python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2022, Microsoft Corporation. All rights reserved. */ #include "mana_ib.h" static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev, struct net_device *ndev, mana_handle_t default_rxobj, mana_handle_t ind_table[], u32 log_ind_tbl_size, u32 rx_hash_key_len, u8 *rx_hash_key) { struct mana_port_context *mpc = netdev_priv(ndev); struct mana_cfg_rx_steer_req_v2 *req; struct mana_cfg_rx_steer_resp resp = {}; mana_handle_t *req_indir_tab; struct gdma_context *gc; struct gdma_dev *mdev; u32 req_buf_size; int i, err; mdev = dev->gdma_dev; gc = mdev->gdma_context; req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * MANA_INDIRECT_TABLE_SIZE; req = kzalloc(req_buf_size, GFP_KERNEL); if (!req) return -ENOMEM; mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size, sizeof(resp)); req->hdr.req.msg_version = GDMA_MESSAGE_V2; req->vport = mpc->port_handle; req->rx_enable = 1; req->update_default_rxobj = 1; req->default_rxobj = default_rxobj; req->hdr.dev_id = mdev->dev_id; /* If there are more than 1 entries in indirection table, enable RSS */ if (log_ind_tbl_size) req->rss_enable = true; req->num_indir_entries = MANA_INDIRECT_TABLE_SIZE; req->indir_tab_offset = sizeof(*req); req->update_indir_tab = true; req->cqe_coalescing_enable = 1; req_indir_tab = (mana_handle_t *)(req + 1); /* The ind table passed to the hardware must have * MANA_INDIRECT_TABLE_SIZE entries. Adjust the verb * ind_table to MANA_INDIRECT_TABLE_SIZE if required */ ibdev_dbg(&dev->ib_dev, "ind table size %u\n", 1 << log_ind_tbl_size); for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) { req_indir_tab[i] = ind_table[i % (1 << log_ind_tbl_size)]; ibdev_dbg(&dev->ib_dev, "index %u handle 0x%llx\n", i, req_indir_tab[i]); } req->update_hashkey = true; if (rx_hash_key_len) memcpy(req->hashkey, rx_hash_key, rx_hash_key_len); else netdev_rss_key_fill(req->hashkey, MANA_HASH_KEY_SIZE); ibdev_dbg(&dev->ib_dev, "vport handle %llu default_rxobj 0x%llx\n", req->vport, default_rxobj); err = mana_gd_send_request(gc, req_buf_size, req, sizeof(resp), &resp); if (err) { netdev_err(ndev, "Failed to configure vPort RX: %d\n", err); goto out; } if (resp.hdr.status) { netdev_err(ndev, "vPort RX configuration failed: 0x%x\n", resp.hdr.status); err = -EPROTO; goto out; } netdev_info(ndev, "Configured steering vPort %llu log_entries %u\n", mpc->port_handle, log_ind_tbl_size); out: kfree(req); return err; } static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd, struct ib_qp_init_attr *attr, struct ib_udata *udata) { struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp); struct mana_ib_dev *mdev = container_of(pd->device, struct mana_ib_dev, ib_dev); struct ib_rwq_ind_table *ind_tbl = attr->rwq_ind_tbl; struct mana_ib_create_qp_rss_resp resp = {}; struct mana_ib_create_qp_rss ucmd = {}; struct gdma_dev *gd = mdev->gdma_dev; mana_handle_t *mana_ind_table; struct mana_port_context *mpc; struct mana_context *mc; struct net_device *ndev; struct mana_ib_cq *cq; struct mana_ib_wq *wq; unsigned int ind_tbl_size; struct ib_cq *ibcq; struct ib_wq *ibwq; int i = 0; u32 port; int ret; mc = gd->driver_data; if (!udata || udata->inlen < sizeof(ucmd)) return -EINVAL; ret = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)); if (ret) { ibdev_dbg(&mdev->ib_dev, "Failed copy from udata for create rss-qp, err %d\n", ret); return ret; } if (attr->cap.max_recv_wr > MAX_SEND_BUFFERS_PER_QUEUE) { ibdev_dbg(&mdev->ib_dev, "Requested max_recv_wr %d exceeding limit\n", attr->cap.max_recv_wr); return -EINVAL; } if (attr->cap.max_recv_sge > MAX_RX_WQE_SGL_ENTRIES) { ibdev_dbg(&mdev->ib_dev, "Requested max_recv_sge %d exceeding limit\n", attr->cap.max_recv_sge); return -EINVAL; } ind_tbl_size = 1 << ind_tbl->log_ind_tbl_size; if (ind_tbl_size > MANA_INDIRECT_TABLE_SIZE) { ibdev_dbg(&mdev->ib_dev, "Indirect table size %d exceeding limit\n", ind_tbl_size); return -EINVAL; } if (ucmd.rx_hash_function != MANA_IB_RX_HASH_FUNC_TOEPLITZ) { ibdev_dbg(&mdev->ib_dev, "RX Hash function is not supported, %d\n", ucmd.rx_hash_function); return -EINVAL; } /* IB ports start with 1, MANA start with 0 */ port = ucmd.port; if (port < 1 || port > mc->num_ports) { ibdev_dbg(&mdev->ib_dev, "Invalid port %u in creating qp\n", port); return -EINVAL; } ndev = mc->ports[port - 1]; mpc = netdev_priv(ndev); ibdev_dbg(&mdev->ib_dev, "rx_hash_function %d port %d\n", ucmd.rx_hash_function, port); mana_ind_table = kcalloc(ind_tbl_size, sizeof(mana_handle_t), GFP_KERNEL); if (!mana_ind_table) { ret = -ENOMEM; goto fail; } qp->port = port; for (i = 0; i < ind_tbl_size; i++) { struct mana_obj_spec wq_spec = {}; struct mana_obj_spec cq_spec = {}; ibwq = ind_tbl->ind_tbl[i]; wq = container_of(ibwq, struct mana_ib_wq, ibwq); ibcq = ibwq->cq; cq = container_of(ibcq, struct mana_ib_cq, ibcq); wq_spec.gdma_region = wq->gdma_region; wq_spec.queue_size = wq->wq_buf_size; cq_spec.gdma_region = cq->gdma_region; cq_spec.queue_size = cq->cqe * COMP_ENTRY_SIZE; cq_spec.modr_ctx_id = 0; cq_spec.attached_eq = GDMA_CQ_NO_EQ; ret = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_RQ, &wq_spec, &cq_spec, &wq->rx_object); if (ret) goto fail; /* The GDMA regions are now owned by the WQ object */ wq->gdma_region = GDMA_INVALID_DMA_REGION; cq->gdma_region = GDMA_INVALID_DMA_REGION; wq->id = wq_spec.queue_index; cq->id = cq_spec.queue_index; ibdev_dbg(&mdev->ib_dev, "ret %d rx_object 0x%llx wq id %llu cq id %llu\n", ret, wq->rx_object, wq->id, cq->id); resp.entries[i].cqid = cq->id; resp.entries[i].wqid = wq->id; mana_ind_table[i] = wq->rx_object; } resp.num_entries = i; ret = mana_ib_cfg_vport_steering(mdev, ndev, wq->rx_object, mana_ind_table, ind_tbl->log_ind_tbl_size, ucmd.rx_hash_key_len, ucmd.rx_hash_key); if (ret) goto fail; ret = ib_copy_to_udata(udata, &resp, sizeof(resp)); if (ret) { ibdev_dbg(&mdev->ib_dev, "Failed to copy to udata create rss-qp, %d\n", ret); goto fail; } kfree(mana_ind_table); return 0; fail: while (i-- > 0) { ibwq = ind_tbl->ind_tbl[i]; wq = container_of(ibwq, struct mana_ib_wq, ibwq); mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object); } kfree(mana_ind_table); return ret; } static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd, struct ib_qp_init_attr *attr, struct ib_udata *udata) { struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd); struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp); struct mana_ib_dev *mdev = container_of(ibpd->device, struct mana_ib_dev, ib_dev); struct mana_ib_cq *send_cq = container_of(attr->send_cq, struct mana_ib_cq, ibcq); struct mana_ib_ucontext *mana_ucontext = rdma_udata_to_drv_context(udata, struct mana_ib_ucontext, ibucontext); struct mana_ib_create_qp_resp resp = {}; struct gdma_dev *gd = mdev->gdma_dev; struct mana_ib_create_qp ucmd = {}; struct mana_obj_spec wq_spec = {}; struct mana_obj_spec cq_spec = {}; struct mana_port_context *mpc; struct mana_context *mc; struct net_device *ndev; struct ib_umem *umem; int err; u32 port; mc = gd->driver_data; if (!mana_ucontext || udata->inlen < sizeof(ucmd)) return -EINVAL; err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)); if (err) { ibdev_dbg(&mdev->ib_dev, "Failed to copy from udata create qp-raw, %d\n", err); return err; } /* IB ports start with 1, MANA Ethernet ports start with 0 */ port = ucmd.port; if (port < 1 || port > mc->num_ports) return -EINVAL; if (attr->cap.max_send_wr > MAX_SEND_BUFFERS_PER_QUEUE) { ibdev_dbg(&mdev->ib_dev, "Requested max_send_wr %d exceeding limit\n", attr->cap.max_send_wr); return -EINVAL; } if (attr->cap.max_send_sge > MAX_TX_WQE_SGL_ENTRIES) { ibdev_dbg(&mdev->ib_dev, "Requested max_send_sge %d exceeding limit\n", attr->cap.max_send_sge); return -EINVAL; } ndev = mc->ports[port - 1]; mpc = netdev_priv(ndev); ibdev_dbg(&mdev->ib_dev, "port %u ndev %p mpc %p\n", port, ndev, mpc); err = mana_ib_cfg_vport(mdev, port - 1, pd, mana_ucontext->doorbell); if (err) return -ENODEV; qp->port = port; ibdev_dbg(&mdev->ib_dev, "ucmd sq_buf_addr 0x%llx port %u\n", ucmd.sq_buf_addr, ucmd.port); umem = ib_umem_get(ibpd->device, ucmd.sq_buf_addr, ucmd.sq_buf_size, IB_ACCESS_LOCAL_WRITE); if (IS_ERR(umem)) { err = PTR_ERR(umem); ibdev_dbg(&mdev->ib_dev, "Failed to get umem for create qp-raw, err %d\n", err); goto err_free_vport; } qp->sq_umem = umem; err = mana_ib_gd_create_dma_region(mdev, qp->sq_umem, &qp->sq_gdma_region); if (err) { ibdev_dbg(&mdev->ib_dev, "Failed to create dma region for create qp-raw, %d\n", err); goto err_release_umem; } ibdev_dbg(&mdev->ib_dev, "mana_ib_gd_create_dma_region ret %d gdma_region 0x%llx\n", err, qp->sq_gdma_region); /* Create a WQ on the same port handle used by the Ethernet */ wq_spec.gdma_region = qp->sq_gdma_region; wq_spec.queue_size = ucmd.sq_buf_size; cq_spec.gdma_region = send_cq->gdma_region; cq_spec.queue_size = send_cq->cqe * COMP_ENTRY_SIZE; cq_spec.modr_ctx_id = 0; cq_spec.attached_eq = GDMA_CQ_NO_EQ; err = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_SQ, &wq_spec, &cq_spec, &qp->tx_object); if (err) { ibdev_dbg(&mdev->ib_dev, "Failed to create wq for create raw-qp, err %d\n", err); goto err_destroy_dma_region; } /* The GDMA regions are now owned by the WQ object */ qp->sq_gdma_region = GDMA_INVALID_DMA_REGION; send_cq->gdma_region = GDMA_INVALID_DMA_REGION; qp->sq_id = wq_spec.queue_index; send_cq->id = cq_spec.queue_index; ibdev_dbg(&mdev->ib_dev, "ret %d qp->tx_object 0x%llx sq id %llu cq id %llu\n", err, qp->tx_object, qp->sq_id, send_cq->id); resp.sqid = qp->sq_id; resp.cqid = send_cq->id; resp.tx_vp_offset = pd->tx_vp_offset; err = ib_copy_to_udata(udata, &resp, sizeof(resp)); if (err) { ibdev_dbg(&mdev->ib_dev, "Failed copy udata for create qp-raw, %d\n", err); goto err_destroy_wq_obj; } return 0; err_destroy_wq_obj: mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object); err_destroy_dma_region: mana_ib_gd_destroy_dma_region(mdev, qp->sq_gdma_region); err_release_umem: ib_umem_release(umem); err_free_vport: mana_ib_uncfg_vport(mdev, pd, port - 1); return err; } int mana_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr, struct ib_udata *udata) { switch (attr->qp_type) { case IB_QPT_RAW_PACKET: /* When rwq_ind_tbl is used, it's for creating WQs for RSS */ if (attr->rwq_ind_tbl) return mana_ib_create_qp_rss(ibqp, ibqp->pd, attr, udata); return mana_ib_create_qp_raw(ibqp, ibqp->pd, attr, udata); default: /* Creating QP other than IB_QPT_RAW_PACKET is not supported */ ibdev_dbg(ibqp->device, "Creating QP type %u not supported\n", attr->qp_type); } return -EINVAL; } int mana_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { /* modify_qp is not supported by this version of the driver */ return -EOPNOTSUPP; } static int mana_ib_destroy_qp_rss(struct mana_ib_qp *qp, struct ib_rwq_ind_table *ind_tbl, struct ib_udata *udata) { struct mana_ib_dev *mdev = container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev); struct gdma_dev *gd = mdev->gdma_dev; struct mana_port_context *mpc; struct mana_context *mc; struct net_device *ndev; struct mana_ib_wq *wq; struct ib_wq *ibwq; int i; mc = gd->driver_data; ndev = mc->ports[qp->port - 1]; mpc = netdev_priv(ndev); for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) { ibwq = ind_tbl->ind_tbl[i]; wq = container_of(ibwq, struct mana_ib_wq, ibwq); ibdev_dbg(&mdev->ib_dev, "destroying wq->rx_object %llu\n", wq->rx_object); mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object); } return 0; } static int mana_ib_destroy_qp_raw(struct mana_ib_qp *qp, struct ib_udata *udata) { struct mana_ib_dev *mdev = container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev); struct gdma_dev *gd = mdev->gdma_dev; struct ib_pd *ibpd = qp->ibqp.pd; struct mana_port_context *mpc; struct mana_context *mc; struct net_device *ndev; struct mana_ib_pd *pd; mc = gd->driver_data; ndev = mc->ports[qp->port - 1]; mpc = netdev_priv(ndev); pd = container_of(ibpd, struct mana_ib_pd, ibpd); mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object); if (qp->sq_umem) { mana_ib_gd_destroy_dma_region(mdev, qp->sq_gdma_region); ib_umem_release(qp->sq_umem); } mana_ib_uncfg_vport(mdev, pd, qp->port - 1); return 0; } int mana_ib_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) { struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp); switch (ibqp->qp_type) { case IB_QPT_RAW_PACKET: if (ibqp->rwq_ind_tbl) return mana_ib_destroy_qp_rss(qp, ibqp->rwq_ind_tbl, udata); return mana_ib_destroy_qp_raw(qp, udata); default: ibdev_dbg(ibqp->device, "Unexpected QP type %u\n", ibqp->qp_type); } return -ENOENT; }
linux-master
drivers/infiniband/hw/mana/qp.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2022, Microsoft Corporation. All rights reserved. */ #include "mana_ib.h" void mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd, u32 port) { struct gdma_dev *gd = dev->gdma_dev; struct mana_port_context *mpc; struct net_device *ndev; struct mana_context *mc; mc = gd->driver_data; ndev = mc->ports[port]; mpc = netdev_priv(ndev); mutex_lock(&pd->vport_mutex); pd->vport_use_count--; WARN_ON(pd->vport_use_count < 0); if (!pd->vport_use_count) mana_uncfg_vport(mpc); mutex_unlock(&pd->vport_mutex); } int mana_ib_cfg_vport(struct mana_ib_dev *dev, u32 port, struct mana_ib_pd *pd, u32 doorbell_id) { struct gdma_dev *mdev = dev->gdma_dev; struct mana_port_context *mpc; struct mana_context *mc; struct net_device *ndev; int err; mc = mdev->driver_data; ndev = mc->ports[port]; mpc = netdev_priv(ndev); mutex_lock(&pd->vport_mutex); pd->vport_use_count++; if (pd->vport_use_count > 1) { ibdev_dbg(&dev->ib_dev, "Skip as this PD is already configured vport\n"); mutex_unlock(&pd->vport_mutex); return 0; } err = mana_cfg_vport(mpc, pd->pdn, doorbell_id); if (err) { pd->vport_use_count--; mutex_unlock(&pd->vport_mutex); ibdev_dbg(&dev->ib_dev, "Failed to configure vPort %d\n", err); return err; } mutex_unlock(&pd->vport_mutex); pd->tx_shortform_allowed = mpc->tx_shortform_allowed; pd->tx_vp_offset = mpc->tx_vp_offset; ibdev_dbg(&dev->ib_dev, "vport handle %llx pdid %x doorbell_id %x\n", mpc->port_handle, pd->pdn, doorbell_id); return 0; } int mana_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd); struct ib_device *ibdev = ibpd->device; struct gdma_create_pd_resp resp = {}; struct gdma_create_pd_req req = {}; enum gdma_pd_flags flags = 0; struct mana_ib_dev *dev; struct gdma_dev *mdev; int err; dev = container_of(ibdev, struct mana_ib_dev, ib_dev); mdev = dev->gdma_dev; mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_PD, sizeof(req), sizeof(resp)); req.flags = flags; err = mana_gd_send_request(mdev->gdma_context, sizeof(req), &req, sizeof(resp), &resp); if (err || resp.hdr.status) { ibdev_dbg(&dev->ib_dev, "Failed to get pd_id err %d status %u\n", err, resp.hdr.status); if (!err) err = -EPROTO; return err; } pd->pd_handle = resp.pd_handle; pd->pdn = resp.pd_id; ibdev_dbg(&dev->ib_dev, "pd_handle 0x%llx pd_id %d\n", pd->pd_handle, pd->pdn); mutex_init(&pd->vport_mutex); pd->vport_use_count = 0; return 0; } int mana_ib_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd); struct ib_device *ibdev = ibpd->device; struct gdma_destory_pd_resp resp = {}; struct gdma_destroy_pd_req req = {}; struct mana_ib_dev *dev; struct gdma_dev *mdev; int err; dev = container_of(ibdev, struct mana_ib_dev, ib_dev); mdev = dev->gdma_dev; mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_PD, sizeof(req), sizeof(resp)); req.pd_handle = pd->pd_handle; err = mana_gd_send_request(mdev->gdma_context, sizeof(req), &req, sizeof(resp), &resp); if (err || resp.hdr.status) { ibdev_dbg(&dev->ib_dev, "Failed to destroy pd_handle 0x%llx err %d status %u", pd->pd_handle, err, resp.hdr.status); if (!err) err = -EPROTO; } return err; } static int mana_gd_destroy_doorbell_page(struct gdma_context *gc, int doorbell_page) { struct gdma_destroy_resource_range_req req = {}; struct gdma_resp_hdr resp = {}; int err; mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_RESOURCE_RANGE, sizeof(req), sizeof(resp)); req.resource_type = GDMA_RESOURCE_DOORBELL_PAGE; req.num_resources = 1; req.allocated_resources = doorbell_page; err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); if (err || resp.status) { dev_err(gc->dev, "Failed to destroy doorbell page: ret %d, 0x%x\n", err, resp.status); return err ?: -EPROTO; } return 0; } static int mana_gd_allocate_doorbell_page(struct gdma_context *gc, int *doorbell_page) { struct gdma_allocate_resource_range_req req = {}; struct gdma_allocate_resource_range_resp resp = {}; int err; mana_gd_init_req_hdr(&req.hdr, GDMA_ALLOCATE_RESOURCE_RANGE, sizeof(req), sizeof(resp)); req.resource_type = GDMA_RESOURCE_DOORBELL_PAGE; req.num_resources = 1; req.alignment = 1; /* Have GDMA start searching from 0 */ req.allocated_resources = 0; err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); if (err || resp.hdr.status) { dev_err(gc->dev, "Failed to allocate doorbell page: ret %d, 0x%x\n", err, resp.hdr.status); return err ?: -EPROTO; } *doorbell_page = resp.allocated_resources; return 0; } int mana_ib_alloc_ucontext(struct ib_ucontext *ibcontext, struct ib_udata *udata) { struct mana_ib_ucontext *ucontext = container_of(ibcontext, struct mana_ib_ucontext, ibucontext); struct ib_device *ibdev = ibcontext->device; struct mana_ib_dev *mdev; struct gdma_context *gc; struct gdma_dev *dev; int doorbell_page; int ret; mdev = container_of(ibdev, struct mana_ib_dev, ib_dev); dev = mdev->gdma_dev; gc = dev->gdma_context; /* Allocate a doorbell page index */ ret = mana_gd_allocate_doorbell_page(gc, &doorbell_page); if (ret) { ibdev_dbg(ibdev, "Failed to allocate doorbell page %d\n", ret); return ret; } ibdev_dbg(ibdev, "Doorbell page allocated %d\n", doorbell_page); ucontext->doorbell = doorbell_page; return 0; } void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) { struct mana_ib_ucontext *mana_ucontext = container_of(ibcontext, struct mana_ib_ucontext, ibucontext); struct ib_device *ibdev = ibcontext->device; struct mana_ib_dev *mdev; struct gdma_context *gc; int ret; mdev = container_of(ibdev, struct mana_ib_dev, ib_dev); gc = mdev->gdma_dev->gdma_context; ret = mana_gd_destroy_doorbell_page(gc, mana_ucontext->doorbell); if (ret) ibdev_dbg(ibdev, "Failed to destroy doorbell page %d\n", ret); } static int mana_ib_gd_first_dma_region(struct mana_ib_dev *dev, struct gdma_context *gc, struct gdma_create_dma_region_req *create_req, size_t num_pages, mana_handle_t *gdma_region, u32 expected_status) { struct gdma_create_dma_region_resp create_resp = {}; unsigned int create_req_msg_size; int err; create_req_msg_size = struct_size(create_req, page_addr_list, num_pages); create_req->page_addr_list_len = num_pages; err = mana_gd_send_request(gc, create_req_msg_size, create_req, sizeof(create_resp), &create_resp); if (err || create_resp.hdr.status != expected_status) { ibdev_dbg(&dev->ib_dev, "Failed to create DMA region: %d, 0x%x\n", err, create_resp.hdr.status); if (!err) err = -EPROTO; return err; } *gdma_region = create_resp.dma_region_handle; ibdev_dbg(&dev->ib_dev, "Created DMA region handle 0x%llx\n", *gdma_region); return 0; } static int mana_ib_gd_add_dma_region(struct mana_ib_dev *dev, struct gdma_context *gc, struct gdma_dma_region_add_pages_req *add_req, unsigned int num_pages, u32 expected_status) { unsigned int add_req_msg_size = struct_size(add_req, page_addr_list, num_pages); struct gdma_general_resp add_resp = {}; int err; mana_gd_init_req_hdr(&add_req->hdr, GDMA_DMA_REGION_ADD_PAGES, add_req_msg_size, sizeof(add_resp)); add_req->page_addr_list_len = num_pages; err = mana_gd_send_request(gc, add_req_msg_size, add_req, sizeof(add_resp), &add_resp); if (err || add_resp.hdr.status != expected_status) { ibdev_dbg(&dev->ib_dev, "Failed to create DMA region: %d, 0x%x\n", err, add_resp.hdr.status); if (!err) err = -EPROTO; return err; } return 0; } int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem, mana_handle_t *gdma_region) { struct gdma_dma_region_add_pages_req *add_req = NULL; size_t num_pages_processed = 0, num_pages_to_handle; struct gdma_create_dma_region_req *create_req; unsigned int create_req_msg_size; struct hw_channel_context *hwc; struct ib_block_iter biter; size_t max_pgs_add_cmd = 0; size_t max_pgs_create_cmd; struct gdma_context *gc; size_t num_pages_total; struct gdma_dev *mdev; unsigned long page_sz; unsigned int tail = 0; u64 *page_addr_list; void *request_buf; int err; mdev = dev->gdma_dev; gc = mdev->gdma_context; hwc = gc->hwc.driver_data; /* Hardware requires dma region to align to chosen page size */ page_sz = ib_umem_find_best_pgsz(umem, PAGE_SZ_BM, 0); if (!page_sz) { ibdev_dbg(&dev->ib_dev, "failed to find page size.\n"); return -ENOMEM; } num_pages_total = ib_umem_num_dma_blocks(umem, page_sz); max_pgs_create_cmd = (hwc->max_req_msg_size - sizeof(*create_req)) / sizeof(u64); num_pages_to_handle = min_t(size_t, num_pages_total, max_pgs_create_cmd); create_req_msg_size = struct_size(create_req, page_addr_list, num_pages_to_handle); request_buf = kzalloc(hwc->max_req_msg_size, GFP_KERNEL); if (!request_buf) return -ENOMEM; create_req = request_buf; mana_gd_init_req_hdr(&create_req->hdr, GDMA_CREATE_DMA_REGION, create_req_msg_size, sizeof(struct gdma_create_dma_region_resp)); create_req->length = umem->length; create_req->offset_in_page = umem->address & (page_sz - 1); create_req->gdma_page_type = order_base_2(page_sz) - PAGE_SHIFT; create_req->page_count = num_pages_total; ibdev_dbg(&dev->ib_dev, "size_dma_region %lu num_pages_total %lu\n", umem->length, num_pages_total); ibdev_dbg(&dev->ib_dev, "page_sz %lu offset_in_page %u\n", page_sz, create_req->offset_in_page); ibdev_dbg(&dev->ib_dev, "num_pages_to_handle %lu, gdma_page_type %u", num_pages_to_handle, create_req->gdma_page_type); page_addr_list = create_req->page_addr_list; rdma_umem_for_each_dma_block(umem, &biter, page_sz) { u32 expected_status = 0; page_addr_list[tail++] = rdma_block_iter_dma_address(&biter); if (tail < num_pages_to_handle) continue; if (num_pages_processed + num_pages_to_handle < num_pages_total) expected_status = GDMA_STATUS_MORE_ENTRIES; if (!num_pages_processed) { /* First create message */ err = mana_ib_gd_first_dma_region(dev, gc, create_req, tail, gdma_region, expected_status); if (err) goto out; max_pgs_add_cmd = (hwc->max_req_msg_size - sizeof(*add_req)) / sizeof(u64); add_req = request_buf; add_req->dma_region_handle = *gdma_region; add_req->reserved3 = 0; page_addr_list = add_req->page_addr_list; } else { /* Subsequent create messages */ err = mana_ib_gd_add_dma_region(dev, gc, add_req, tail, expected_status); if (err) break; } num_pages_processed += tail; tail = 0; /* The remaining pages to create */ num_pages_to_handle = min_t(size_t, num_pages_total - num_pages_processed, max_pgs_add_cmd); } if (err) mana_ib_gd_destroy_dma_region(dev, *gdma_region); out: kfree(request_buf); return err; } int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev, u64 gdma_region) { struct gdma_dev *mdev = dev->gdma_dev; struct gdma_context *gc; gc = mdev->gdma_context; ibdev_dbg(&dev->ib_dev, "destroy dma region 0x%llx\n", gdma_region); return mana_gd_destroy_dma_region(gc, gdma_region); } int mana_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma) { struct mana_ib_ucontext *mana_ucontext = container_of(ibcontext, struct mana_ib_ucontext, ibucontext); struct ib_device *ibdev = ibcontext->device; struct mana_ib_dev *mdev; struct gdma_context *gc; phys_addr_t pfn; pgprot_t prot; int ret; mdev = container_of(ibdev, struct mana_ib_dev, ib_dev); gc = mdev->gdma_dev->gdma_context; if (vma->vm_pgoff != 0) { ibdev_dbg(ibdev, "Unexpected vm_pgoff %lu\n", vma->vm_pgoff); return -EINVAL; } /* Map to the page indexed by ucontext->doorbell */ pfn = (gc->phys_db_page_base + gc->db_page_size * mana_ucontext->doorbell) >> PAGE_SHIFT; prot = pgprot_writecombine(vma->vm_page_prot); ret = rdma_user_mmap_io(ibcontext, vma, pfn, gc->db_page_size, prot, NULL); if (ret) ibdev_dbg(ibdev, "can't rdma_user_mmap_io ret %d\n", ret); else ibdev_dbg(ibdev, "mapped I/O pfn 0x%llx page_size %u, ret %d\n", pfn, gc->db_page_size, ret); return ret; } int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num, struct ib_port_immutable *immutable) { /* * This version only support RAW_PACKET * other values need to be filled for other types */ immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET; return 0; } int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props, struct ib_udata *uhw) { props->max_qp = MANA_MAX_NUM_QUEUES; props->max_qp_wr = MAX_SEND_BUFFERS_PER_QUEUE; /* * max_cqe could be potentially much bigger. * As this version of driver only support RAW QP, set it to the same * value as max_qp_wr */ props->max_cqe = MAX_SEND_BUFFERS_PER_QUEUE; props->max_mr_size = MANA_IB_MAX_MR_SIZE; props->max_mr = MANA_IB_MAX_MR; props->max_send_sge = MAX_TX_WQE_SGL_ENTRIES; props->max_recv_sge = MAX_RX_WQE_SGL_ENTRIES; return 0; } int mana_ib_query_port(struct ib_device *ibdev, u32 port, struct ib_port_attr *props) { /* This version doesn't return port properties */ return 0; } int mana_ib_query_gid(struct ib_device *ibdev, u32 port, int index, union ib_gid *gid) { /* This version doesn't return GID properties */ return 0; } void mana_ib_disassociate_ucontext(struct ib_ucontext *ibcontext) { }
linux-master
drivers/infiniband/hw/mana/main.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2022, Microsoft Corporation. All rights reserved. */ #include "mana_ib.h" #include <net/mana/mana_auxiliary.h> MODULE_DESCRIPTION("Microsoft Azure Network Adapter IB driver"); MODULE_LICENSE("GPL"); MODULE_IMPORT_NS(NET_MANA); static const struct ib_device_ops mana_ib_dev_ops = { .owner = THIS_MODULE, .driver_id = RDMA_DRIVER_MANA, .uverbs_abi_ver = MANA_IB_UVERBS_ABI_VERSION, .alloc_pd = mana_ib_alloc_pd, .alloc_ucontext = mana_ib_alloc_ucontext, .create_cq = mana_ib_create_cq, .create_qp = mana_ib_create_qp, .create_rwq_ind_table = mana_ib_create_rwq_ind_table, .create_wq = mana_ib_create_wq, .dealloc_pd = mana_ib_dealloc_pd, .dealloc_ucontext = mana_ib_dealloc_ucontext, .dereg_mr = mana_ib_dereg_mr, .destroy_cq = mana_ib_destroy_cq, .destroy_qp = mana_ib_destroy_qp, .destroy_rwq_ind_table = mana_ib_destroy_rwq_ind_table, .destroy_wq = mana_ib_destroy_wq, .disassociate_ucontext = mana_ib_disassociate_ucontext, .get_port_immutable = mana_ib_get_port_immutable, .mmap = mana_ib_mmap, .modify_qp = mana_ib_modify_qp, .modify_wq = mana_ib_modify_wq, .query_device = mana_ib_query_device, .query_gid = mana_ib_query_gid, .query_port = mana_ib_query_port, .reg_user_mr = mana_ib_reg_user_mr, INIT_RDMA_OBJ_SIZE(ib_cq, mana_ib_cq, ibcq), INIT_RDMA_OBJ_SIZE(ib_pd, mana_ib_pd, ibpd), INIT_RDMA_OBJ_SIZE(ib_qp, mana_ib_qp, ibqp), INIT_RDMA_OBJ_SIZE(ib_ucontext, mana_ib_ucontext, ibucontext), INIT_RDMA_OBJ_SIZE(ib_rwq_ind_table, mana_ib_rwq_ind_table, ib_ind_table), }; static int mana_ib_probe(struct auxiliary_device *adev, const struct auxiliary_device_id *id) { struct mana_adev *madev = container_of(adev, struct mana_adev, adev); struct gdma_dev *mdev = madev->mdev; struct mana_context *mc; struct mana_ib_dev *dev; int ret; mc = mdev->driver_data; dev = ib_alloc_device(mana_ib_dev, ib_dev); if (!dev) return -ENOMEM; ib_set_device_ops(&dev->ib_dev, &mana_ib_dev_ops); dev->ib_dev.phys_port_cnt = mc->num_ports; ibdev_dbg(&dev->ib_dev, "mdev=%p id=%d num_ports=%d\n", mdev, mdev->dev_id.as_uint32, dev->ib_dev.phys_port_cnt); dev->gdma_dev = mdev; dev->ib_dev.node_type = RDMA_NODE_IB_CA; /* * num_comp_vectors needs to set to the max MSIX index * when interrupts and event queues are implemented */ dev->ib_dev.num_comp_vectors = 1; dev->ib_dev.dev.parent = mdev->gdma_context->dev; ret = ib_register_device(&dev->ib_dev, "mana_%d", mdev->gdma_context->dev); if (ret) { ib_dealloc_device(&dev->ib_dev); return ret; } dev_set_drvdata(&adev->dev, dev); return 0; } static void mana_ib_remove(struct auxiliary_device *adev) { struct mana_ib_dev *dev = dev_get_drvdata(&adev->dev); ib_unregister_device(&dev->ib_dev); ib_dealloc_device(&dev->ib_dev); } static const struct auxiliary_device_id mana_id_table[] = { { .name = "mana.rdma", }, {}, }; MODULE_DEVICE_TABLE(auxiliary, mana_id_table); static struct auxiliary_driver mana_driver = { .name = "rdma", .probe = mana_ib_probe, .remove = mana_ib_remove, .id_table = mana_id_table, }; module_auxiliary_driver(mana_driver);
linux-master
drivers/infiniband/hw/mana/device.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2022, Microsoft Corporation. All rights reserved. */ #include "mana_ib.h" int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct ib_udata *udata) { struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq); struct ib_device *ibdev = ibcq->device; struct mana_ib_create_cq ucmd = {}; struct mana_ib_dev *mdev; int err; mdev = container_of(ibdev, struct mana_ib_dev, ib_dev); if (udata->inlen < sizeof(ucmd)) return -EINVAL; err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)); if (err) { ibdev_dbg(ibdev, "Failed to copy from udata for create cq, %d\n", err); return err; } if (attr->cqe > MAX_SEND_BUFFERS_PER_QUEUE) { ibdev_dbg(ibdev, "CQE %d exceeding limit\n", attr->cqe); return -EINVAL; } cq->cqe = attr->cqe; cq->umem = ib_umem_get(ibdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE, IB_ACCESS_LOCAL_WRITE); if (IS_ERR(cq->umem)) { err = PTR_ERR(cq->umem); ibdev_dbg(ibdev, "Failed to get umem for create cq, err %d\n", err); return err; } err = mana_ib_gd_create_dma_region(mdev, cq->umem, &cq->gdma_region); if (err) { ibdev_dbg(ibdev, "Failed to create dma region for create cq, %d\n", err); goto err_release_umem; } ibdev_dbg(ibdev, "mana_ib_gd_create_dma_region ret %d gdma_region 0x%llx\n", err, cq->gdma_region); /* * The CQ ID is not known at this time. The ID is generated at create_qp */ return 0; err_release_umem: ib_umem_release(cq->umem); return err; } int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) { struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq); struct ib_device *ibdev = ibcq->device; struct mana_ib_dev *mdev; mdev = container_of(ibdev, struct mana_ib_dev, ib_dev); mana_ib_gd_destroy_dma_region(mdev, cq->gdma_region); ib_umem_release(cq->umem); return 0; }
linux-master
drivers/infiniband/hw/mana/cq.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2022, Microsoft Corporation. All rights reserved. */ #include "mana_ib.h" #define VALID_MR_FLAGS \ (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ) static enum gdma_mr_access_flags mana_ib_verbs_to_gdma_access_flags(int access_flags) { enum gdma_mr_access_flags flags = GDMA_ACCESS_FLAG_LOCAL_READ; if (access_flags & IB_ACCESS_LOCAL_WRITE) flags |= GDMA_ACCESS_FLAG_LOCAL_WRITE; if (access_flags & IB_ACCESS_REMOTE_WRITE) flags |= GDMA_ACCESS_FLAG_REMOTE_WRITE; if (access_flags & IB_ACCESS_REMOTE_READ) flags |= GDMA_ACCESS_FLAG_REMOTE_READ; return flags; } static int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct mana_ib_mr *mr, struct gdma_create_mr_params *mr_params) { struct gdma_create_mr_response resp = {}; struct gdma_create_mr_request req = {}; struct gdma_dev *mdev = dev->gdma_dev; struct gdma_context *gc; int err; gc = mdev->gdma_context; mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_MR, sizeof(req), sizeof(resp)); req.pd_handle = mr_params->pd_handle; req.mr_type = mr_params->mr_type; switch (mr_params->mr_type) { case GDMA_MR_TYPE_GVA: req.gva.dma_region_handle = mr_params->gva.dma_region_handle; req.gva.virtual_address = mr_params->gva.virtual_address; req.gva.access_flags = mr_params->gva.access_flags; break; default: ibdev_dbg(&dev->ib_dev, "invalid param (GDMA_MR_TYPE) passed, type %d\n", req.mr_type); return -EINVAL; } err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); if (err || resp.hdr.status) { ibdev_dbg(&dev->ib_dev, "Failed to create mr %d, %u", err, resp.hdr.status); if (!err) err = -EPROTO; return err; } mr->ibmr.lkey = resp.lkey; mr->ibmr.rkey = resp.rkey; mr->mr_handle = resp.mr_handle; return 0; } static int mana_ib_gd_destroy_mr(struct mana_ib_dev *dev, u64 mr_handle) { struct gdma_destroy_mr_response resp = {}; struct gdma_destroy_mr_request req = {}; struct gdma_dev *mdev = dev->gdma_dev; struct gdma_context *gc; int err; gc = mdev->gdma_context; mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_MR, sizeof(req), sizeof(resp)); req.mr_handle = mr_handle; err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); if (err || resp.hdr.status) { dev_err(gc->dev, "Failed to destroy MR: %d, 0x%x\n", err, resp.hdr.status); if (!err) err = -EPROTO; return err; } return 0; } struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length, u64 iova, int access_flags, struct ib_udata *udata) { struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd); struct gdma_create_mr_params mr_params = {}; struct ib_device *ibdev = ibpd->device; struct mana_ib_dev *dev; struct mana_ib_mr *mr; u64 dma_region_handle; int err; dev = container_of(ibdev, struct mana_ib_dev, ib_dev); ibdev_dbg(ibdev, "start 0x%llx, iova 0x%llx length 0x%llx access_flags 0x%x", start, iova, length, access_flags); if (access_flags & ~VALID_MR_FLAGS) return ERR_PTR(-EINVAL); mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) return ERR_PTR(-ENOMEM); mr->umem = ib_umem_get(ibdev, start, length, access_flags); if (IS_ERR(mr->umem)) { err = PTR_ERR(mr->umem); ibdev_dbg(ibdev, "Failed to get umem for register user-mr, %d\n", err); goto err_free; } err = mana_ib_gd_create_dma_region(dev, mr->umem, &dma_region_handle); if (err) { ibdev_dbg(ibdev, "Failed create dma region for user-mr, %d\n", err); goto err_umem; } ibdev_dbg(ibdev, "mana_ib_gd_create_dma_region ret %d gdma_region %llx\n", err, dma_region_handle); mr_params.pd_handle = pd->pd_handle; mr_params.mr_type = GDMA_MR_TYPE_GVA; mr_params.gva.dma_region_handle = dma_region_handle; mr_params.gva.virtual_address = iova; mr_params.gva.access_flags = mana_ib_verbs_to_gdma_access_flags(access_flags); err = mana_ib_gd_create_mr(dev, mr, &mr_params); if (err) goto err_dma_region; /* * There is no need to keep track of dma_region_handle after MR is * successfully created. The dma_region_handle is tracked in the PF * as part of the lifecycle of this MR. */ return &mr->ibmr; err_dma_region: mana_gd_destroy_dma_region(dev->gdma_dev->gdma_context, dma_region_handle); err_umem: ib_umem_release(mr->umem); err_free: kfree(mr); return ERR_PTR(err); } int mana_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) { struct mana_ib_mr *mr = container_of(ibmr, struct mana_ib_mr, ibmr); struct ib_device *ibdev = ibmr->device; struct mana_ib_dev *dev; int err; dev = container_of(ibdev, struct mana_ib_dev, ib_dev); err = mana_ib_gd_destroy_mr(dev, mr->mr_handle); if (err) return err; if (mr->umem) ib_umem_release(mr->umem); kfree(mr); return 0; }
linux-master
drivers/infiniband/hw/mana/mr.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2015 - 2018 Intel Corporation. */ #include <linux/spinlock.h> #include "hfi.h" #include "mad.h" #include "qp.h" #include "verbs_txreq.h" #include "trace.h" static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id) { return (gid->global.interface_id == id && (gid->global.subnet_prefix == gid_prefix || gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX)); } /* * * This should be called with the QP r_lock held. * * The s_lock will be acquired around the hfi1_migrate_qp() call. */ int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_packet *packet) { __be64 guid; unsigned long flags; struct rvt_qp *qp = packet->qp; u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)]; u32 dlid = packet->dlid; u32 slid = packet->slid; u32 sl = packet->sl; bool migrated = packet->migrated; u16 pkey = packet->pkey; if (qp->s_mig_state == IB_MIG_ARMED && migrated) { if (!packet->grh) { if ((rdma_ah_get_ah_flags(&qp->alt_ah_attr) & IB_AH_GRH) && (packet->etype != RHF_RCV_TYPE_BYPASS)) return 1; } else { const struct ib_global_route *grh; if (!(rdma_ah_get_ah_flags(&qp->alt_ah_attr) & IB_AH_GRH)) return 1; grh = rdma_ah_read_grh(&qp->alt_ah_attr); guid = get_sguid(ibp, grh->sgid_index); if (!gid_ok(&packet->grh->dgid, ibp->rvp.gid_prefix, guid)) return 1; if (!gid_ok( &packet->grh->sgid, grh->dgid.global.subnet_prefix, grh->dgid.global.interface_id)) return 1; } if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), pkey, sc5, slid))) { hfi1_bad_pkey(ibp, pkey, sl, 0, qp->ibqp.qp_num, slid, dlid); return 1; } /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */ if (slid != rdma_ah_get_dlid(&qp->alt_ah_attr) || ppd_from_ibp(ibp)->port != rdma_ah_get_port_num(&qp->alt_ah_attr)) return 1; spin_lock_irqsave(&qp->s_lock, flags); hfi1_migrate_qp(qp); spin_unlock_irqrestore(&qp->s_lock, flags); } else { if (!packet->grh) { if ((rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) && (packet->etype != RHF_RCV_TYPE_BYPASS)) return 1; } else { const struct ib_global_route *grh; if (!(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) return 1; grh = rdma_ah_read_grh(&qp->remote_ah_attr); guid = get_sguid(ibp, grh->sgid_index); if (!gid_ok(&packet->grh->dgid, ibp->rvp.gid_prefix, guid)) return 1; if (!gid_ok( &packet->grh->sgid, grh->dgid.global.subnet_prefix, grh->dgid.global.interface_id)) return 1; } if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), pkey, sc5, slid))) { hfi1_bad_pkey(ibp, pkey, sl, 0, qp->ibqp.qp_num, slid, dlid); return 1; } /* Validate the SLID. See Ch. 9.6.1.5 */ if ((slid != rdma_ah_get_dlid(&qp->remote_ah_attr)) || ppd_from_ibp(ibp)->port != qp->port_num) return 1; if (qp->s_mig_state == IB_MIG_REARM && !migrated) qp->s_mig_state = IB_MIG_ARMED; } return 0; } /** * hfi1_make_grh - construct a GRH header * @ibp: a pointer to the IB port * @hdr: a pointer to the GRH header being constructed * @grh: the global route address to send to * @hwords: size of header after grh being sent in dwords * @nwords: the number of 32 bit words of data being sent * * Return the size of the header in 32 bit words. */ u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr, const struct ib_global_route *grh, u32 hwords, u32 nwords) { hdr->version_tclass_flow = cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) | (grh->traffic_class << IB_GRH_TCLASS_SHIFT) | (grh->flow_label << IB_GRH_FLOW_SHIFT)); hdr->paylen = cpu_to_be16((hwords + nwords) << 2); /* next_hdr is defined by C8-7 in ch. 8.4.1 */ hdr->next_hdr = IB_GRH_NEXT_HDR; hdr->hop_limit = grh->hop_limit; /* The SGID is 32-bit aligned. */ hdr->sgid.global.subnet_prefix = ibp->rvp.gid_prefix; hdr->sgid.global.interface_id = grh->sgid_index < HFI1_GUIDS_PER_PORT ? get_sguid(ibp, grh->sgid_index) : get_sguid(ibp, HFI1_PORT_GUID_INDEX); hdr->dgid = grh->dgid; /* GRH header size in 32-bit words. */ return sizeof(struct ib_grh) / sizeof(u32); } #define BTH2_OFFSET (offsetof(struct hfi1_sdma_header, \ hdr.ibh.u.oth.bth[2]) / 4) /** * build_ahg - create ahg in s_ahg * @qp: a pointer to QP * @npsn: the next PSN for the request/response * * This routine handles the AHG by allocating an ahg entry and causing the * copy of the first middle. * * Subsequent middles use the copied entry, editing the * PSN with 1 or 2 edits. */ static inline void build_ahg(struct rvt_qp *qp, u32 npsn) { struct hfi1_qp_priv *priv = qp->priv; if (unlikely(qp->s_flags & HFI1_S_AHG_CLEAR)) clear_ahg(qp); if (!(qp->s_flags & HFI1_S_AHG_VALID)) { /* first middle that needs copy */ if (qp->s_ahgidx < 0) qp->s_ahgidx = sdma_ahg_alloc(priv->s_sde); if (qp->s_ahgidx >= 0) { qp->s_ahgpsn = npsn; priv->s_ahg->tx_flags |= SDMA_TXREQ_F_AHG_COPY; /* save to protect a change in another thread */ priv->s_ahg->ahgidx = qp->s_ahgidx; qp->s_flags |= HFI1_S_AHG_VALID; } } else { /* subsequent middle after valid */ if (qp->s_ahgidx >= 0) { priv->s_ahg->tx_flags |= SDMA_TXREQ_F_USE_AHG; priv->s_ahg->ahgidx = qp->s_ahgidx; priv->s_ahg->ahgcount++; priv->s_ahg->ahgdesc[0] = sdma_build_ahg_descriptor( (__force u16)cpu_to_be16((u16)npsn), BTH2_OFFSET, 16, 16); if ((npsn & 0xffff0000) != (qp->s_ahgpsn & 0xffff0000)) { priv->s_ahg->ahgcount++; priv->s_ahg->ahgdesc[1] = sdma_build_ahg_descriptor( (__force u16)cpu_to_be16( (u16)(npsn >> 16)), BTH2_OFFSET, 0, 16); } } } } static inline void hfi1_make_ruc_bth(struct rvt_qp *qp, struct ib_other_headers *ohdr, u32 bth0, u32 bth1, u32 bth2) { ohdr->bth[0] = cpu_to_be32(bth0); ohdr->bth[1] = cpu_to_be32(bth1); ohdr->bth[2] = cpu_to_be32(bth2); } /** * hfi1_make_ruc_header_16B - build a 16B header * @qp: the queue pair * @ohdr: a pointer to the destination header memory * @bth0: bth0 passed in from the RC/UC builder * @bth1: bth1 passed in from the RC/UC builder * @bth2: bth2 passed in from the RC/UC builder * @middle: non zero implies indicates ahg "could" be used * @ps: the current packet state * * This routine may disarm ahg under these situations: * - packet needs a GRH * - BECN needed * - migration state not IB_MIG_MIGRATED */ static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp, struct ib_other_headers *ohdr, u32 bth0, u32 bth1, u32 bth2, int middle, struct hfi1_pkt_state *ps) { struct hfi1_qp_priv *priv = qp->priv; struct hfi1_ibport *ibp = ps->ibp; struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); u32 slid; u16 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index); u8 l4 = OPA_16B_L4_IB_LOCAL; u8 extra_bytes = hfi1_get_16b_padding( (ps->s_txreq->hdr_dwords << 2), ps->s_txreq->s_cur_size); u32 nwords = SIZE_OF_CRC + ((ps->s_txreq->s_cur_size + extra_bytes + SIZE_OF_LT) >> 2); bool becn = false; if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) && hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))) { struct ib_grh *grh; struct ib_global_route *grd = rdma_ah_retrieve_grh(&qp->remote_ah_attr); /* * Ensure OPA GIDs are transformed to IB gids * before creating the GRH. */ if (grd->sgid_index == OPA_GID_INDEX) grd->sgid_index = 0; grh = &ps->s_txreq->phdr.hdr.opah.u.l.grh; l4 = OPA_16B_L4_IB_GLOBAL; ps->s_txreq->hdr_dwords += hfi1_make_grh(ibp, grh, grd, ps->s_txreq->hdr_dwords - LRH_16B_DWORDS, nwords); middle = 0; } if (qp->s_mig_state == IB_MIG_MIGRATED) bth1 |= OPA_BTH_MIG_REQ; else middle = 0; if (qp->s_flags & RVT_S_ECN) { qp->s_flags &= ~RVT_S_ECN; /* we recently received a FECN, so return a BECN */ becn = true; middle = 0; } if (middle) build_ahg(qp, bth2); else qp->s_flags &= ~HFI1_S_AHG_VALID; bth0 |= pkey; bth0 |= extra_bytes << 20; hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2); if (!ppd->lid) slid = be32_to_cpu(OPA_LID_PERMISSIVE); else slid = ppd->lid | (rdma_ah_get_path_bits(&qp->remote_ah_attr) & ((1 << ppd->lmc) - 1)); hfi1_make_16b_hdr(&ps->s_txreq->phdr.hdr.opah, slid, opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr), 16B), (ps->s_txreq->hdr_dwords + nwords) >> 1, pkey, becn, 0, l4, priv->s_sc); } /** * hfi1_make_ruc_header_9B - build a 9B header * @qp: the queue pair * @ohdr: a pointer to the destination header memory * @bth0: bth0 passed in from the RC/UC builder * @bth1: bth1 passed in from the RC/UC builder * @bth2: bth2 passed in from the RC/UC builder * @middle: non zero implies indicates ahg "could" be used * @ps: the current packet state * * This routine may disarm ahg under these situations: * - packet needs a GRH * - BECN needed * - migration state not IB_MIG_MIGRATED */ static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp, struct ib_other_headers *ohdr, u32 bth0, u32 bth1, u32 bth2, int middle, struct hfi1_pkt_state *ps) { struct hfi1_qp_priv *priv = qp->priv; struct hfi1_ibport *ibp = ps->ibp; u16 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index); u16 lrh0 = HFI1_LRH_BTH; u8 extra_bytes = -ps->s_txreq->s_cur_size & 3; u32 nwords = SIZE_OF_CRC + ((ps->s_txreq->s_cur_size + extra_bytes) >> 2); if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) { struct ib_grh *grh = &ps->s_txreq->phdr.hdr.ibh.u.l.grh; lrh0 = HFI1_LRH_GRH; ps->s_txreq->hdr_dwords += hfi1_make_grh(ibp, grh, rdma_ah_read_grh(&qp->remote_ah_attr), ps->s_txreq->hdr_dwords - LRH_9B_DWORDS, nwords); middle = 0; } lrh0 |= (priv->s_sc & 0xf) << 12 | (rdma_ah_get_sl(&qp->remote_ah_attr) & 0xf) << 4; if (qp->s_mig_state == IB_MIG_MIGRATED) bth0 |= IB_BTH_MIG_REQ; else middle = 0; if (qp->s_flags & RVT_S_ECN) { qp->s_flags &= ~RVT_S_ECN; /* we recently received a FECN, so return a BECN */ bth1 |= (IB_BECN_MASK << IB_BECN_SHIFT); middle = 0; } if (middle) build_ahg(qp, bth2); else qp->s_flags &= ~HFI1_S_AHG_VALID; bth0 |= pkey; bth0 |= extra_bytes << 20; hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2); hfi1_make_ib_hdr(&ps->s_txreq->phdr.hdr.ibh, lrh0, ps->s_txreq->hdr_dwords + nwords, opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr), 9B), ppd_from_ibp(ibp)->lid | rdma_ah_get_path_bits(&qp->remote_ah_attr)); } typedef void (*hfi1_make_ruc_hdr)(struct rvt_qp *qp, struct ib_other_headers *ohdr, u32 bth0, u32 bth1, u32 bth2, int middle, struct hfi1_pkt_state *ps); /* We support only two types - 9B and 16B for now */ static const hfi1_make_ruc_hdr hfi1_ruc_header_tbl[2] = { [HFI1_PKT_TYPE_9B] = &hfi1_make_ruc_header_9B, [HFI1_PKT_TYPE_16B] = &hfi1_make_ruc_header_16B }; void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr, u32 bth0, u32 bth1, u32 bth2, int middle, struct hfi1_pkt_state *ps) { struct hfi1_qp_priv *priv = qp->priv; /* * reset s_ahg/AHG fields * * This insures that the ahgentry/ahgcount * are at a non-AHG default to protect * build_verbs_tx_desc() from using * an include ahgidx. * * build_ahg() will modify as appropriate * to use the AHG feature. */ priv->s_ahg->tx_flags = 0; priv->s_ahg->ahgcount = 0; priv->s_ahg->ahgidx = 0; /* Make the appropriate header */ hfi1_ruc_header_tbl[priv->hdr_type](qp, ohdr, bth0, bth1, bth2, middle, ps); } /* when sending, force a reschedule every one of these periods */ #define SEND_RESCHED_TIMEOUT (5 * HZ) /* 5s in jiffies */ /** * hfi1_schedule_send_yield - test for a yield required for QP * send engine * @qp: a pointer to QP * @ps: a pointer to a structure with commonly lookup values for * the send engine progress * @tid: true if it is the tid leg * * This routine checks if the time slice for the QP has expired * for RC QPs, if so an additional work entry is queued. At this * point, other QPs have an opportunity to be scheduled. It * returns true if a yield is required, otherwise, false * is returned. */ bool hfi1_schedule_send_yield(struct rvt_qp *qp, struct hfi1_pkt_state *ps, bool tid) { ps->pkts_sent = true; if (unlikely(time_after(jiffies, ps->timeout))) { if (!ps->in_thread || workqueue_congested(ps->cpu, ps->ppd->hfi1_wq)) { spin_lock_irqsave(&qp->s_lock, ps->flags); if (!tid) { qp->s_flags &= ~RVT_S_BUSY; hfi1_schedule_send(qp); } else { struct hfi1_qp_priv *priv = qp->priv; if (priv->s_flags & HFI1_S_TID_BUSY_SET) { qp->s_flags &= ~RVT_S_BUSY; priv->s_flags &= ~(HFI1_S_TID_BUSY_SET | RVT_S_BUSY); } else { priv->s_flags &= ~RVT_S_BUSY; } hfi1_schedule_tid_send(qp); } spin_unlock_irqrestore(&qp->s_lock, ps->flags); this_cpu_inc(*ps->ppd->dd->send_schedule); trace_hfi1_rc_expired_time_slice(qp, true); return true; } cond_resched(); this_cpu_inc(*ps->ppd->dd->send_schedule); ps->timeout = jiffies + ps->timeout_int; } trace_hfi1_rc_expired_time_slice(qp, false); return false; } void hfi1_do_send_from_rvt(struct rvt_qp *qp) { hfi1_do_send(qp, false); } void _hfi1_do_send(struct work_struct *work) { struct iowait_work *w = container_of(work, struct iowait_work, iowork); struct rvt_qp *qp = iowait_to_qp(w->iow); hfi1_do_send(qp, true); } /** * hfi1_do_send - perform a send on a QP * @qp: a pointer to the QP * @in_thread: true if in a workqueue thread * * Process entries in the send work queue until credit or queue is * exhausted. Only allow one CPU to send a packet per QP. * Otherwise, two threads could send packets out of order. */ void hfi1_do_send(struct rvt_qp *qp, bool in_thread) { struct hfi1_pkt_state ps; struct hfi1_qp_priv *priv = qp->priv; int (*make_req)(struct rvt_qp *qp, struct hfi1_pkt_state *ps); ps.dev = to_idev(qp->ibqp.device); ps.ibp = to_iport(qp->ibqp.device, qp->port_num); ps.ppd = ppd_from_ibp(ps.ibp); ps.in_thread = in_thread; ps.wait = iowait_get_ib_work(&priv->s_iowait); trace_hfi1_rc_do_send(qp, in_thread); switch (qp->ibqp.qp_type) { case IB_QPT_RC: if (!loopback && ((rdma_ah_get_dlid(&qp->remote_ah_attr) & ~((1 << ps.ppd->lmc) - 1)) == ps.ppd->lid)) { rvt_ruc_loopback(qp); return; } make_req = hfi1_make_rc_req; ps.timeout_int = qp->timeout_jiffies; break; case IB_QPT_UC: if (!loopback && ((rdma_ah_get_dlid(&qp->remote_ah_attr) & ~((1 << ps.ppd->lmc) - 1)) == ps.ppd->lid)) { rvt_ruc_loopback(qp); return; } make_req = hfi1_make_uc_req; ps.timeout_int = SEND_RESCHED_TIMEOUT; break; default: make_req = hfi1_make_ud_req; ps.timeout_int = SEND_RESCHED_TIMEOUT; } spin_lock_irqsave(&qp->s_lock, ps.flags); /* Return if we are already busy processing a work request. */ if (!hfi1_send_ok(qp)) { if (qp->s_flags & HFI1_S_ANY_WAIT_IO) iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_IB); spin_unlock_irqrestore(&qp->s_lock, ps.flags); return; } qp->s_flags |= RVT_S_BUSY; ps.timeout_int = ps.timeout_int / 8; ps.timeout = jiffies + ps.timeout_int; ps.cpu = priv->s_sde ? priv->s_sde->cpu : cpumask_first(cpumask_of_node(ps.ppd->dd->node)); ps.pkts_sent = false; /* insure a pre-built packet is handled */ ps.s_txreq = get_waiting_verbs_txreq(ps.wait); do { /* Check for a constructed packet to be sent. */ if (ps.s_txreq) { if (priv->s_flags & HFI1_S_TID_BUSY_SET) qp->s_flags |= RVT_S_BUSY; spin_unlock_irqrestore(&qp->s_lock, ps.flags); /* * If the packet cannot be sent now, return and * the send engine will be woken up later. */ if (hfi1_verbs_send(qp, &ps)) return; /* allow other tasks to run */ if (hfi1_schedule_send_yield(qp, &ps, false)) return; spin_lock_irqsave(&qp->s_lock, ps.flags); } } while (make_req(qp, &ps)); iowait_starve_clear(ps.pkts_sent, &priv->s_iowait); spin_unlock_irqrestore(&qp->s_lock, ps.flags); }
linux-master
drivers/infiniband/hw/hfi1/ruc.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2015 - 2020 Intel Corporation. */ #define CREATE_TRACE_POINTS #include "trace.h" #include "exp_rcv.h" #include "ipoib.h" static u8 __get_ib_hdr_len(struct ib_header *hdr) { struct ib_other_headers *ohdr; u8 opcode; if (ib_get_lnh(hdr) == HFI1_LRH_BTH) ohdr = &hdr->u.oth; else ohdr = &hdr->u.l.oth; opcode = ib_bth_get_opcode(ohdr); return hdr_len_by_opcode[opcode] == 0 ? 0 : hdr_len_by_opcode[opcode] - (12 + 8); } static u8 __get_16b_hdr_len(struct hfi1_16b_header *hdr) { struct ib_other_headers *ohdr = NULL; u8 opcode; u8 l4 = hfi1_16B_get_l4(hdr); if (l4 == OPA_16B_L4_FM) { opcode = IB_OPCODE_UD_SEND_ONLY; return (8 + 8); /* No BTH */ } if (l4 == OPA_16B_L4_IB_LOCAL) ohdr = &hdr->u.oth; else ohdr = &hdr->u.l.oth; opcode = ib_bth_get_opcode(ohdr); return hdr_len_by_opcode[opcode] == 0 ? 0 : hdr_len_by_opcode[opcode] - (12 + 8 + 8); } u8 hfi1_trace_packet_hdr_len(struct hfi1_packet *packet) { if (packet->etype != RHF_RCV_TYPE_BYPASS) return __get_ib_hdr_len(packet->hdr); else return __get_16b_hdr_len(packet->hdr); } u8 hfi1_trace_opa_hdr_len(struct hfi1_opa_header *opa_hdr) { if (!opa_hdr->hdr_type) return __get_ib_hdr_len(&opa_hdr->ibh); else return __get_16b_hdr_len(&opa_hdr->opah); } const char *hfi1_trace_get_packet_l4_str(u8 l4) { if (l4) return "16B"; else return "9B"; } const char *hfi1_trace_get_packet_l2_str(u8 l2) { switch (l2) { case 0: return "0"; case 1: return "1"; case 2: return "16B"; case 3: return "9B"; } return ""; } #define IMM_PRN "imm:%d" #define RETH_PRN "reth vaddr:0x%.16llx rkey:0x%.8x dlen:0x%.8x" #define AETH_PRN "aeth syn:0x%.2x %s msn:0x%.8x" #define DETH_PRN "deth qkey:0x%.8x sqpn:0x%.6x" #define DETH_ENTROPY_PRN "deth qkey:0x%.8x sqpn:0x%.6x entropy:0x%.2x" #define IETH_PRN "ieth rkey:0x%.8x" #define ATOMICACKETH_PRN "origdata:%llx" #define ATOMICETH_PRN "vaddr:0x%llx rkey:0x%.8x sdata:%llx cdata:%llx" #define TID_RDMA_KDETH "kdeth0 0x%x kdeth1 0x%x" #define TID_RDMA_KDETH_DATA "kdeth0 0x%x: kver %u sh %u intr %u tidctrl %u tid %x offset %x kdeth1 0x%x: jkey %x" #define TID_READ_REQ_PRN "tid_flow_psn 0x%x tid_flow_qp 0x%x verbs_qp 0x%x" #define TID_READ_RSP_PRN "verbs_qp 0x%x" #define TID_WRITE_REQ_PRN "original_qp 0x%x" #define TID_WRITE_RSP_PRN "tid_flow_psn 0x%x tid_flow_qp 0x%x verbs_qp 0x%x" #define TID_WRITE_DATA_PRN "verbs_qp 0x%x" #define TID_ACK_PRN "tid_flow_psn 0x%x verbs_psn 0x%x tid_flow_qp 0x%x verbs_qp 0x%x" #define TID_RESYNC_PRN "verbs_qp 0x%x" #define OP(transport, op) IB_OPCODE_## transport ## _ ## op static const char *parse_syndrome(u8 syndrome) { switch (syndrome >> 5) { case 0: return "ACK"; case 1: return "RNRNAK"; case 3: return "NAK"; } return ""; } void hfi1_trace_parse_9b_bth(struct ib_other_headers *ohdr, u8 *ack, bool *becn, bool *fecn, u8 *mig, u8 *se, u8 *pad, u8 *opcode, u8 *tver, u16 *pkey, u32 *psn, u32 *qpn) { *ack = ib_bth_get_ackreq(ohdr); *becn = ib_bth_get_becn(ohdr); *fecn = ib_bth_get_fecn(ohdr); *mig = ib_bth_get_migreq(ohdr); *se = ib_bth_get_se(ohdr); *pad = ib_bth_get_pad(ohdr); *opcode = ib_bth_get_opcode(ohdr); *tver = ib_bth_get_tver(ohdr); *pkey = ib_bth_get_pkey(ohdr); *psn = mask_psn(ib_bth_get_psn(ohdr)); *qpn = ib_bth_get_qpn(ohdr); } void hfi1_trace_parse_16b_bth(struct ib_other_headers *ohdr, u8 *ack, u8 *mig, u8 *opcode, u8 *pad, u8 *se, u8 *tver, u32 *psn, u32 *qpn) { *ack = ib_bth_get_ackreq(ohdr); *mig = ib_bth_get_migreq(ohdr); *opcode = ib_bth_get_opcode(ohdr); *pad = ib_bth_get_pad(ohdr); *se = ib_bth_get_se(ohdr); *tver = ib_bth_get_tver(ohdr); *psn = mask_psn(ib_bth_get_psn(ohdr)); *qpn = ib_bth_get_qpn(ohdr); } static u16 ib_get_len(const struct ib_header *hdr) { return be16_to_cpu(hdr->lrh[2]); } void hfi1_trace_parse_9b_hdr(struct ib_header *hdr, bool sc5, u8 *lnh, u8 *lver, u8 *sl, u8 *sc, u16 *len, u32 *dlid, u32 *slid) { *lnh = ib_get_lnh(hdr); *lver = ib_get_lver(hdr); *sl = ib_get_sl(hdr); *sc = ib_get_sc(hdr) | (sc5 << 4); *len = ib_get_len(hdr); *dlid = ib_get_dlid(hdr); *slid = ib_get_slid(hdr); } void hfi1_trace_parse_16b_hdr(struct hfi1_16b_header *hdr, u8 *age, bool *becn, bool *fecn, u8 *l4, u8 *rc, u8 *sc, u16 *entropy, u16 *len, u16 *pkey, u32 *dlid, u32 *slid) { *age = hfi1_16B_get_age(hdr); *becn = hfi1_16B_get_becn(hdr); *fecn = hfi1_16B_get_fecn(hdr); *l4 = hfi1_16B_get_l4(hdr); *rc = hfi1_16B_get_rc(hdr); *sc = hfi1_16B_get_sc(hdr); *entropy = hfi1_16B_get_entropy(hdr); *len = hfi1_16B_get_len(hdr); *pkey = hfi1_16B_get_pkey(hdr); *dlid = hfi1_16B_get_dlid(hdr); *slid = hfi1_16B_get_slid(hdr); } #define LRH_PRN "len:%d sc:%d dlid:0x%.4x slid:0x%.4x " #define LRH_9B_PRN "lnh:%d,%s lver:%d sl:%d" #define LRH_16B_PRN "age:%d becn:%d fecn:%d l4:%d " \ "rc:%d sc:%d pkey:0x%.4x entropy:0x%.4x" const char *hfi1_trace_fmt_lrh(struct trace_seq *p, bool bypass, u8 age, bool becn, bool fecn, u8 l4, u8 lnh, const char *lnh_name, u8 lver, u8 rc, u8 sc, u8 sl, u16 entropy, u16 len, u16 pkey, u32 dlid, u32 slid) { const char *ret = trace_seq_buffer_ptr(p); trace_seq_printf(p, LRH_PRN, len, sc, dlid, slid); if (bypass) trace_seq_printf(p, LRH_16B_PRN, age, becn, fecn, l4, rc, sc, pkey, entropy); else trace_seq_printf(p, LRH_9B_PRN, lnh, lnh_name, lver, sl); trace_seq_putc(p, 0); return ret; } #define BTH_9B_PRN \ "op:0x%.2x,%s se:%d m:%d pad:%d tver:%d pkey:0x%.4x " \ "f:%d b:%d qpn:0x%.6x a:%d psn:0x%.8x" #define BTH_16B_PRN \ "op:0x%.2x,%s se:%d m:%d pad:%d tver:%d " \ "qpn:0x%.6x a:%d psn:0x%.8x" #define L4_FM_16B_PRN \ "op:0x%.2x,%s dest_qpn:0x%.6x src_qpn:0x%.6x" const char *hfi1_trace_fmt_rest(struct trace_seq *p, bool bypass, u8 l4, u8 ack, bool becn, bool fecn, u8 mig, u8 se, u8 pad, u8 opcode, const char *opname, u8 tver, u16 pkey, u32 psn, u32 qpn, u32 dest_qpn, u32 src_qpn) { const char *ret = trace_seq_buffer_ptr(p); if (bypass) if (l4 == OPA_16B_L4_FM) trace_seq_printf(p, L4_FM_16B_PRN, opcode, opname, dest_qpn, src_qpn); else trace_seq_printf(p, BTH_16B_PRN, opcode, opname, se, mig, pad, tver, qpn, ack, psn); else trace_seq_printf(p, BTH_9B_PRN, opcode, opname, se, mig, pad, tver, pkey, fecn, becn, qpn, ack, psn); trace_seq_putc(p, 0); return ret; } const char *parse_everbs_hdrs( struct trace_seq *p, u8 opcode, u8 l4, u32 dest_qpn, u32 src_qpn, void *ehdrs) { union ib_ehdrs *eh = ehdrs; const char *ret = trace_seq_buffer_ptr(p); if (l4 == OPA_16B_L4_FM) { trace_seq_printf(p, "mgmt pkt"); goto out; } switch (opcode) { /* imm */ case OP(RC, SEND_LAST_WITH_IMMEDIATE): case OP(UC, SEND_LAST_WITH_IMMEDIATE): case OP(RC, SEND_ONLY_WITH_IMMEDIATE): case OP(UC, SEND_ONLY_WITH_IMMEDIATE): case OP(RC, RDMA_WRITE_LAST_WITH_IMMEDIATE): case OP(UC, RDMA_WRITE_LAST_WITH_IMMEDIATE): trace_seq_printf(p, IMM_PRN, be32_to_cpu(eh->imm_data)); break; /* reth + imm */ case OP(RC, RDMA_WRITE_ONLY_WITH_IMMEDIATE): case OP(UC, RDMA_WRITE_ONLY_WITH_IMMEDIATE): trace_seq_printf(p, RETH_PRN " " IMM_PRN, get_ib_reth_vaddr(&eh->rc.reth), be32_to_cpu(eh->rc.reth.rkey), be32_to_cpu(eh->rc.reth.length), be32_to_cpu(eh->rc.imm_data)); break; /* reth */ case OP(RC, RDMA_READ_REQUEST): case OP(RC, RDMA_WRITE_FIRST): case OP(UC, RDMA_WRITE_FIRST): case OP(RC, RDMA_WRITE_ONLY): case OP(UC, RDMA_WRITE_ONLY): trace_seq_printf(p, RETH_PRN, get_ib_reth_vaddr(&eh->rc.reth), be32_to_cpu(eh->rc.reth.rkey), be32_to_cpu(eh->rc.reth.length)); break; case OP(RC, RDMA_READ_RESPONSE_FIRST): case OP(RC, RDMA_READ_RESPONSE_LAST): case OP(RC, RDMA_READ_RESPONSE_ONLY): case OP(RC, ACKNOWLEDGE): trace_seq_printf(p, AETH_PRN, be32_to_cpu(eh->aeth) >> 24, parse_syndrome(be32_to_cpu(eh->aeth) >> 24), be32_to_cpu(eh->aeth) & IB_MSN_MASK); break; case OP(TID_RDMA, WRITE_REQ): trace_seq_printf(p, TID_RDMA_KDETH " " RETH_PRN " " TID_WRITE_REQ_PRN, le32_to_cpu(eh->tid_rdma.w_req.kdeth0), le32_to_cpu(eh->tid_rdma.w_req.kdeth1), ib_u64_get(&eh->tid_rdma.w_req.reth.vaddr), be32_to_cpu(eh->tid_rdma.w_req.reth.rkey), be32_to_cpu(eh->tid_rdma.w_req.reth.length), be32_to_cpu(eh->tid_rdma.w_req.verbs_qp)); break; case OP(TID_RDMA, WRITE_RESP): trace_seq_printf(p, TID_RDMA_KDETH " " AETH_PRN " " TID_WRITE_RSP_PRN, le32_to_cpu(eh->tid_rdma.w_rsp.kdeth0), le32_to_cpu(eh->tid_rdma.w_rsp.kdeth1), be32_to_cpu(eh->tid_rdma.w_rsp.aeth) >> 24, parse_syndrome(/* aeth */ be32_to_cpu(eh->tid_rdma.w_rsp.aeth) >> 24), (be32_to_cpu(eh->tid_rdma.w_rsp.aeth) & IB_MSN_MASK), be32_to_cpu(eh->tid_rdma.w_rsp.tid_flow_psn), be32_to_cpu(eh->tid_rdma.w_rsp.tid_flow_qp), be32_to_cpu(eh->tid_rdma.w_rsp.verbs_qp)); break; case OP(TID_RDMA, WRITE_DATA_LAST): case OP(TID_RDMA, WRITE_DATA): trace_seq_printf(p, TID_RDMA_KDETH_DATA " " TID_WRITE_DATA_PRN, le32_to_cpu(eh->tid_rdma.w_data.kdeth0), KDETH_GET(eh->tid_rdma.w_data.kdeth0, KVER), KDETH_GET(eh->tid_rdma.w_data.kdeth0, SH), KDETH_GET(eh->tid_rdma.w_data.kdeth0, INTR), KDETH_GET(eh->tid_rdma.w_data.kdeth0, TIDCTRL), KDETH_GET(eh->tid_rdma.w_data.kdeth0, TID), KDETH_GET(eh->tid_rdma.w_data.kdeth0, OFFSET), le32_to_cpu(eh->tid_rdma.w_data.kdeth1), KDETH_GET(eh->tid_rdma.w_data.kdeth1, JKEY), be32_to_cpu(eh->tid_rdma.w_data.verbs_qp)); break; case OP(TID_RDMA, READ_REQ): trace_seq_printf(p, TID_RDMA_KDETH " " RETH_PRN " " TID_READ_REQ_PRN, le32_to_cpu(eh->tid_rdma.r_req.kdeth0), le32_to_cpu(eh->tid_rdma.r_req.kdeth1), ib_u64_get(&eh->tid_rdma.r_req.reth.vaddr), be32_to_cpu(eh->tid_rdma.r_req.reth.rkey), be32_to_cpu(eh->tid_rdma.r_req.reth.length), be32_to_cpu(eh->tid_rdma.r_req.tid_flow_psn), be32_to_cpu(eh->tid_rdma.r_req.tid_flow_qp), be32_to_cpu(eh->tid_rdma.r_req.verbs_qp)); break; case OP(TID_RDMA, READ_RESP): trace_seq_printf(p, TID_RDMA_KDETH_DATA " " AETH_PRN " " TID_READ_RSP_PRN, le32_to_cpu(eh->tid_rdma.r_rsp.kdeth0), KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, KVER), KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, SH), KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, INTR), KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, TIDCTRL), KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, TID), KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, OFFSET), le32_to_cpu(eh->tid_rdma.r_rsp.kdeth1), KDETH_GET(eh->tid_rdma.r_rsp.kdeth1, JKEY), be32_to_cpu(eh->tid_rdma.r_rsp.aeth) >> 24, parse_syndrome(/* aeth */ be32_to_cpu(eh->tid_rdma.r_rsp.aeth) >> 24), (be32_to_cpu(eh->tid_rdma.r_rsp.aeth) & IB_MSN_MASK), be32_to_cpu(eh->tid_rdma.r_rsp.verbs_qp)); break; case OP(TID_RDMA, ACK): trace_seq_printf(p, TID_RDMA_KDETH " " AETH_PRN " " TID_ACK_PRN, le32_to_cpu(eh->tid_rdma.ack.kdeth0), le32_to_cpu(eh->tid_rdma.ack.kdeth1), be32_to_cpu(eh->tid_rdma.ack.aeth) >> 24, parse_syndrome(/* aeth */ be32_to_cpu(eh->tid_rdma.ack.aeth) >> 24), (be32_to_cpu(eh->tid_rdma.ack.aeth) & IB_MSN_MASK), be32_to_cpu(eh->tid_rdma.ack.tid_flow_psn), be32_to_cpu(eh->tid_rdma.ack.verbs_psn), be32_to_cpu(eh->tid_rdma.ack.tid_flow_qp), be32_to_cpu(eh->tid_rdma.ack.verbs_qp)); break; case OP(TID_RDMA, RESYNC): trace_seq_printf(p, TID_RDMA_KDETH " " TID_RESYNC_PRN, le32_to_cpu(eh->tid_rdma.resync.kdeth0), le32_to_cpu(eh->tid_rdma.resync.kdeth1), be32_to_cpu(eh->tid_rdma.resync.verbs_qp)); break; /* aeth + atomicacketh */ case OP(RC, ATOMIC_ACKNOWLEDGE): trace_seq_printf(p, AETH_PRN " " ATOMICACKETH_PRN, be32_to_cpu(eh->at.aeth) >> 24, parse_syndrome(be32_to_cpu(eh->at.aeth) >> 24), be32_to_cpu(eh->at.aeth) & IB_MSN_MASK, ib_u64_get(&eh->at.atomic_ack_eth)); break; /* atomiceth */ case OP(RC, COMPARE_SWAP): case OP(RC, FETCH_ADD): trace_seq_printf(p, ATOMICETH_PRN, get_ib_ateth_vaddr(&eh->atomic_eth), eh->atomic_eth.rkey, get_ib_ateth_swap(&eh->atomic_eth), get_ib_ateth_compare(&eh->atomic_eth)); break; /* deth */ case OP(UD, SEND_ONLY): trace_seq_printf(p, DETH_ENTROPY_PRN, be32_to_cpu(eh->ud.deth[0]), be32_to_cpu(eh->ud.deth[1]) & RVT_QPN_MASK, be32_to_cpu(eh->ud.deth[1]) >> HFI1_IPOIB_ENTROPY_SHIFT); break; case OP(UD, SEND_ONLY_WITH_IMMEDIATE): trace_seq_printf(p, DETH_PRN, be32_to_cpu(eh->ud.deth[0]), be32_to_cpu(eh->ud.deth[1]) & RVT_QPN_MASK); break; /* ieth */ case OP(RC, SEND_LAST_WITH_INVALIDATE): case OP(RC, SEND_ONLY_WITH_INVALIDATE): trace_seq_printf(p, IETH_PRN, be32_to_cpu(eh->ieth)); break; } out: trace_seq_putc(p, 0); return ret; } const char *parse_sdma_flags( struct trace_seq *p, u64 desc0, u64 desc1) { const char *ret = trace_seq_buffer_ptr(p); char flags[5] = { 'x', 'x', 'x', 'x', 0 }; flags[0] = (desc1 & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-'; flags[1] = (desc1 & SDMA_DESC1_HEAD_TO_HOST_FLAG) ? 'H' : '-'; flags[2] = (desc0 & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-'; flags[3] = (desc0 & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-'; trace_seq_printf(p, "%s", flags); if (desc0 & SDMA_DESC0_FIRST_DESC_FLAG) trace_seq_printf(p, " amode:%u aidx:%u alen:%u", (u8)((desc1 >> SDMA_DESC1_HEADER_MODE_SHIFT) & SDMA_DESC1_HEADER_MODE_MASK), (u8)((desc1 >> SDMA_DESC1_HEADER_INDEX_SHIFT) & SDMA_DESC1_HEADER_INDEX_MASK), (u8)((desc1 >> SDMA_DESC1_HEADER_DWS_SHIFT) & SDMA_DESC1_HEADER_DWS_MASK)); return ret; } const char *print_u32_array( struct trace_seq *p, u32 *arr, int len) { int i; const char *ret = trace_seq_buffer_ptr(p); for (i = 0; i < len ; i++) trace_seq_printf(p, "%s%#x", i == 0 ? "" : " ", arr[i]); trace_seq_putc(p, 0); return ret; } u8 hfi1_trace_get_tid_ctrl(u32 ent) { return EXP_TID_GET(ent, CTRL); } u16 hfi1_trace_get_tid_len(u32 ent) { return EXP_TID_GET(ent, LEN); } u16 hfi1_trace_get_tid_idx(u32 ent) { return EXP_TID_GET(ent, IDX); } struct hfi1_ctxt_hist { atomic_t count; atomic_t data[255]; }; static struct hfi1_ctxt_hist hist = { .count = ATOMIC_INIT(0) }; const char *hfi1_trace_print_rsm_hist(struct trace_seq *p, unsigned int ctxt) { int i, len = ARRAY_SIZE(hist.data); const char *ret = trace_seq_buffer_ptr(p); unsigned long packet_count = atomic_fetch_inc(&hist.count); trace_seq_printf(p, "packet[%lu]", packet_count); for (i = 0; i < len; ++i) { unsigned long val; atomic_t *count = &hist.data[i]; if (ctxt == i) val = atomic_fetch_inc(count); else val = atomic_read(count); if (val) trace_seq_printf(p, "(%d:%lu)", i, val); } trace_seq_putc(p, 0); return ret; } __hfi1_trace_fn(AFFINITY); __hfi1_trace_fn(PKT); __hfi1_trace_fn(PROC); __hfi1_trace_fn(SDMA); __hfi1_trace_fn(LINKVERB); __hfi1_trace_fn(DEBUG); __hfi1_trace_fn(SNOOP); __hfi1_trace_fn(CNTR); __hfi1_trace_fn(PIO); __hfi1_trace_fn(DC8051); __hfi1_trace_fn(FIRMWARE); __hfi1_trace_fn(RCVCTRL); __hfi1_trace_fn(TID); __hfi1_trace_fn(MMU); __hfi1_trace_fn(IOCTL);
linux-master
drivers/infiniband/hw/hfi1/trace.c
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) /* * Copyright(c) 2018 Intel Corporation. * */ #include "hfi.h" #include "trace.h" #include "qp.h" #include "opfn.h" #define IB_BTHE_E BIT(IB_BTHE_E_SHIFT) #define OPFN_CODE(code) BIT((code) - 1) #define OPFN_MASK(code) OPFN_CODE(STL_VERBS_EXTD_##code) struct hfi1_opfn_type { bool (*request)(struct rvt_qp *qp, u64 *data); bool (*response)(struct rvt_qp *qp, u64 *data); bool (*reply)(struct rvt_qp *qp, u64 data); void (*error)(struct rvt_qp *qp); }; static struct hfi1_opfn_type hfi1_opfn_handlers[STL_VERBS_EXTD_MAX] = { [STL_VERBS_EXTD_TID_RDMA] = { .request = tid_rdma_conn_req, .response = tid_rdma_conn_resp, .reply = tid_rdma_conn_reply, .error = tid_rdma_conn_error, }, }; static struct workqueue_struct *opfn_wq; static void opfn_schedule_conn_request(struct rvt_qp *qp); static bool hfi1_opfn_extended(u32 bth1) { return !!(bth1 & IB_BTHE_E); } static void opfn_conn_request(struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; struct ib_atomic_wr wr; u16 mask, capcode; struct hfi1_opfn_type *extd; u64 data; unsigned long flags; int ret = 0; trace_hfi1_opfn_state_conn_request(qp); spin_lock_irqsave(&priv->opfn.lock, flags); /* * Exit if the extended bit is not set, or if nothing is requested, or * if we have completed all requests, or if a previous request is in * progress */ if (!priv->opfn.extended || !priv->opfn.requested || priv->opfn.requested == priv->opfn.completed || priv->opfn.curr) goto done; mask = priv->opfn.requested & ~priv->opfn.completed; capcode = ilog2(mask & ~(mask - 1)) + 1; if (capcode >= STL_VERBS_EXTD_MAX) { priv->opfn.completed |= OPFN_CODE(capcode); goto done; } extd = &hfi1_opfn_handlers[capcode]; if (!extd || !extd->request || !extd->request(qp, &data)) { /* * Either there is no handler for this capability or the request * packet could not be generated. Either way, mark it as done so * we don't keep attempting to complete it. */ priv->opfn.completed |= OPFN_CODE(capcode); goto done; } trace_hfi1_opfn_data_conn_request(qp, capcode, data); data = (data & ~0xf) | capcode; memset(&wr, 0, sizeof(wr)); wr.wr.opcode = IB_WR_OPFN; wr.remote_addr = HFI1_VERBS_E_ATOMIC_VADDR; wr.compare_add = data; priv->opfn.curr = capcode; /* A new request is now in progress */ /* Drop opfn.lock before calling ib_post_send() */ spin_unlock_irqrestore(&priv->opfn.lock, flags); ret = ib_post_send(&qp->ibqp, &wr.wr, NULL); if (ret) goto err; trace_hfi1_opfn_state_conn_request(qp); return; err: trace_hfi1_msg_opfn_conn_request(qp, "ib_ost_send failed: ret = ", (u64)ret); spin_lock_irqsave(&priv->opfn.lock, flags); /* * In case of an unexpected error return from ib_post_send * clear opfn.curr and reschedule to try again */ priv->opfn.curr = STL_VERBS_EXTD_NONE; opfn_schedule_conn_request(qp); done: spin_unlock_irqrestore(&priv->opfn.lock, flags); } void opfn_send_conn_request(struct work_struct *work) { struct hfi1_opfn_data *od; struct hfi1_qp_priv *qpriv; od = container_of(work, struct hfi1_opfn_data, opfn_work); qpriv = container_of(od, struct hfi1_qp_priv, opfn); opfn_conn_request(qpriv->owner); } /* * When QP s_lock is held in the caller, the OPFN request must be scheduled * to a different workqueue to avoid double locking QP s_lock in call to * ib_post_send in opfn_conn_request */ static void opfn_schedule_conn_request(struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; trace_hfi1_opfn_state_sched_conn_request(qp); queue_work(opfn_wq, &priv->opfn.opfn_work); } void opfn_conn_response(struct rvt_qp *qp, struct rvt_ack_entry *e, struct ib_atomic_eth *ateth) { struct hfi1_qp_priv *priv = qp->priv; u64 data = be64_to_cpu(ateth->compare_data); struct hfi1_opfn_type *extd; u8 capcode; unsigned long flags; trace_hfi1_opfn_state_conn_response(qp); capcode = data & 0xf; trace_hfi1_opfn_data_conn_response(qp, capcode, data); if (!capcode || capcode >= STL_VERBS_EXTD_MAX) return; extd = &hfi1_opfn_handlers[capcode]; if (!extd || !extd->response) { e->atomic_data = capcode; return; } spin_lock_irqsave(&priv->opfn.lock, flags); if (priv->opfn.completed & OPFN_CODE(capcode)) { /* * We are receiving a request for a feature that has already * been negotiated. This may mean that the other side has reset */ priv->opfn.completed &= ~OPFN_CODE(capcode); if (extd->error) extd->error(qp); } if (extd->response(qp, &data)) priv->opfn.completed |= OPFN_CODE(capcode); e->atomic_data = (data & ~0xf) | capcode; trace_hfi1_opfn_state_conn_response(qp); spin_unlock_irqrestore(&priv->opfn.lock, flags); } void opfn_conn_reply(struct rvt_qp *qp, u64 data) { struct hfi1_qp_priv *priv = qp->priv; struct hfi1_opfn_type *extd; u8 capcode; unsigned long flags; trace_hfi1_opfn_state_conn_reply(qp); capcode = data & 0xf; trace_hfi1_opfn_data_conn_reply(qp, capcode, data); if (!capcode || capcode >= STL_VERBS_EXTD_MAX) return; spin_lock_irqsave(&priv->opfn.lock, flags); /* * Either there is no previous request or the reply is not for the * current request */ if (!priv->opfn.curr || capcode != priv->opfn.curr) goto done; extd = &hfi1_opfn_handlers[capcode]; if (!extd || !extd->reply) goto clear; if (extd->reply(qp, data)) priv->opfn.completed |= OPFN_CODE(capcode); clear: /* * Clear opfn.curr to indicate that the previous request is no longer in * progress */ priv->opfn.curr = STL_VERBS_EXTD_NONE; trace_hfi1_opfn_state_conn_reply(qp); done: spin_unlock_irqrestore(&priv->opfn.lock, flags); } void opfn_conn_error(struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; struct hfi1_opfn_type *extd = NULL; unsigned long flags; u16 capcode; trace_hfi1_opfn_state_conn_error(qp); trace_hfi1_msg_opfn_conn_error(qp, "error. qp state ", (u64)qp->state); /* * The QP has gone into the Error state. We have to invalidate all * negotiated feature, including the one in progress (if any). The RC * QP handling will clean the WQE for the connection request. */ spin_lock_irqsave(&priv->opfn.lock, flags); while (priv->opfn.completed) { capcode = priv->opfn.completed & ~(priv->opfn.completed - 1); extd = &hfi1_opfn_handlers[ilog2(capcode) + 1]; if (extd->error) extd->error(qp); priv->opfn.completed &= ~OPFN_CODE(capcode); } priv->opfn.extended = 0; priv->opfn.requested = 0; priv->opfn.curr = STL_VERBS_EXTD_NONE; spin_unlock_irqrestore(&priv->opfn.lock, flags); } void opfn_qp_init(struct rvt_qp *qp, struct ib_qp_attr *attr, int attr_mask) { struct ib_qp *ibqp = &qp->ibqp; struct hfi1_qp_priv *priv = qp->priv; unsigned long flags; if (attr_mask & IB_QP_RETRY_CNT) priv->s_retry = attr->retry_cnt; spin_lock_irqsave(&priv->opfn.lock, flags); if (ibqp->qp_type == IB_QPT_RC && HFI1_CAP_IS_KSET(TID_RDMA)) { struct tid_rdma_params *local = &priv->tid_rdma.local; if (attr_mask & IB_QP_TIMEOUT) priv->tid_retry_timeout_jiffies = qp->timeout_jiffies; if (qp->pmtu == enum_to_mtu(OPA_MTU_4096) || qp->pmtu == enum_to_mtu(OPA_MTU_8192)) { tid_rdma_opfn_init(qp, local); /* * We only want to set the OPFN requested bit when the * QP transitions to RTS. */ if (attr_mask & IB_QP_STATE && attr->qp_state == IB_QPS_RTS) { priv->opfn.requested |= OPFN_MASK(TID_RDMA); /* * If the QP is transitioning to RTS and the * opfn.completed for TID RDMA has already been * set, the QP is being moved *back* into RTS. * We can now renegotiate the TID RDMA * parameters. */ if (priv->opfn.completed & OPFN_MASK(TID_RDMA)) { priv->opfn.completed &= ~OPFN_MASK(TID_RDMA); /* * Since the opfn.completed bit was * already set, it is safe to assume * that the opfn.extended is also set. */ opfn_schedule_conn_request(qp); } } } else { memset(local, 0, sizeof(*local)); } } spin_unlock_irqrestore(&priv->opfn.lock, flags); } void opfn_trigger_conn_request(struct rvt_qp *qp, u32 bth1) { struct hfi1_qp_priv *priv = qp->priv; if (!priv->opfn.extended && hfi1_opfn_extended(bth1) && HFI1_CAP_IS_KSET(OPFN)) { priv->opfn.extended = 1; if (qp->state == IB_QPS_RTS) opfn_conn_request(qp); } } int opfn_init(void) { opfn_wq = alloc_workqueue("hfi_opfn", WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES); if (!opfn_wq) return -ENOMEM; return 0; } void opfn_exit(void) { if (opfn_wq) { destroy_workqueue(opfn_wq); opfn_wq = NULL; } }
linux-master
drivers/infiniband/hw/hfi1/opfn.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2015-2018 Intel Corporation. */ #include <linux/net.h> #include <rdma/opa_addr.h> #define OPA_NUM_PKEY_BLOCKS_PER_SMP (OPA_SMP_DR_DATA_SIZE \ / (OPA_PARTITION_TABLE_BLK_SIZE * sizeof(u16))) #include "hfi.h" #include "mad.h" #include "trace.h" #include "qp.h" #include "vnic.h" /* the reset value from the FM is supposed to be 0xffff, handle both */ #define OPA_LINK_WIDTH_RESET_OLD 0x0fff #define OPA_LINK_WIDTH_RESET 0xffff struct trap_node { struct list_head list; struct opa_mad_notice_attr data; __be64 tid; int len; u32 retry; u8 in_use; u8 repress; }; static int smp_length_check(u32 data_size, u32 request_len) { if (unlikely(request_len < data_size)) return -EINVAL; return 0; } static int reply(struct ib_mad_hdr *smp) { /* * The verbs framework will handle the directed/LID route * packet changes. */ smp->method = IB_MGMT_METHOD_GET_RESP; if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) smp->status |= IB_SMP_DIRECTION; return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; } static inline void clear_opa_smp_data(struct opa_smp *smp) { void *data = opa_get_smp_data(smp); size_t size = opa_get_smp_data_size(smp); memset(data, 0, size); } static u16 hfi1_lookup_pkey_value(struct hfi1_ibport *ibp, int pkey_idx) { struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); if (pkey_idx < ARRAY_SIZE(ppd->pkeys)) return ppd->pkeys[pkey_idx]; return 0; } void hfi1_event_pkey_change(struct hfi1_devdata *dd, u32 port) { struct ib_event event; event.event = IB_EVENT_PKEY_CHANGE; event.device = &dd->verbs_dev.rdi.ibdev; event.element.port_num = port; ib_dispatch_event(&event); } /* * If the port is down, clean up all pending traps. We need to be careful * with the given trap, because it may be queued. */ static void cleanup_traps(struct hfi1_ibport *ibp, struct trap_node *trap) { struct trap_node *node, *q; unsigned long flags; struct list_head trap_list; int i; for (i = 0; i < RVT_MAX_TRAP_LISTS; i++) { spin_lock_irqsave(&ibp->rvp.lock, flags); list_replace_init(&ibp->rvp.trap_lists[i].list, &trap_list); ibp->rvp.trap_lists[i].list_len = 0; spin_unlock_irqrestore(&ibp->rvp.lock, flags); /* * Remove all items from the list, freeing all the non-given * traps. */ list_for_each_entry_safe(node, q, &trap_list, list) { list_del(&node->list); if (node != trap) kfree(node); } } /* * If this wasn't on one of the lists it would not be freed. If it * was on the list, it is now safe to free. */ kfree(trap); } static struct trap_node *check_and_add_trap(struct hfi1_ibport *ibp, struct trap_node *trap) { struct trap_node *node; struct trap_list *trap_list; unsigned long flags; unsigned long timeout; int found = 0; unsigned int queue_id; static int trap_count; queue_id = trap->data.generic_type & 0x0F; if (queue_id >= RVT_MAX_TRAP_LISTS) { trap_count++; pr_err_ratelimited("hfi1: Invalid trap 0x%0x dropped. Total dropped: %d\n", trap->data.generic_type, trap_count); kfree(trap); return NULL; } /* * Since the retry (handle timeout) does not remove a trap request * from the list, all we have to do is compare the node. */ spin_lock_irqsave(&ibp->rvp.lock, flags); trap_list = &ibp->rvp.trap_lists[queue_id]; list_for_each_entry(node, &trap_list->list, list) { if (node == trap) { node->retry++; found = 1; break; } } /* If it is not on the list, add it, limited to RVT-MAX_TRAP_LEN. */ if (!found) { if (trap_list->list_len < RVT_MAX_TRAP_LEN) { trap_list->list_len++; list_add_tail(&trap->list, &trap_list->list); } else { pr_warn_ratelimited("hfi1: Maximum trap limit reached for 0x%0x traps\n", trap->data.generic_type); kfree(trap); } } /* * Next check to see if there is a timer pending. If not, set it up * and get the first trap from the list. */ node = NULL; if (!timer_pending(&ibp->rvp.trap_timer)) { /* * o14-2 * If the time out is set we have to wait until it expires * before the trap can be sent. * This should be > RVT_TRAP_TIMEOUT */ timeout = (RVT_TRAP_TIMEOUT * (1UL << ibp->rvp.subnet_timeout)) / 1000; mod_timer(&ibp->rvp.trap_timer, jiffies + usecs_to_jiffies(timeout)); node = list_first_entry(&trap_list->list, struct trap_node, list); node->in_use = 1; } spin_unlock_irqrestore(&ibp->rvp.lock, flags); return node; } static void subn_handle_opa_trap_repress(struct hfi1_ibport *ibp, struct opa_smp *smp) { struct trap_list *trap_list; struct trap_node *trap; unsigned long flags; int i; if (smp->attr_id != IB_SMP_ATTR_NOTICE) return; spin_lock_irqsave(&ibp->rvp.lock, flags); for (i = 0; i < RVT_MAX_TRAP_LISTS; i++) { trap_list = &ibp->rvp.trap_lists[i]; trap = list_first_entry_or_null(&trap_list->list, struct trap_node, list); if (trap && trap->tid == smp->tid) { if (trap->in_use) { trap->repress = 1; } else { trap_list->list_len--; list_del(&trap->list); kfree(trap); } break; } } spin_unlock_irqrestore(&ibp->rvp.lock, flags); } static void hfi1_update_sm_ah_attr(struct hfi1_ibport *ibp, struct rdma_ah_attr *attr, u32 dlid) { rdma_ah_set_dlid(attr, dlid); rdma_ah_set_port_num(attr, ppd_from_ibp(ibp)->port); if (dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) { struct ib_global_route *grh = rdma_ah_retrieve_grh(attr); rdma_ah_set_ah_flags(attr, IB_AH_GRH); grh->sgid_index = 0; grh->hop_limit = 1; grh->dgid.global.subnet_prefix = ibp->rvp.gid_prefix; grh->dgid.global.interface_id = OPA_MAKE_ID(dlid); } } static int hfi1_modify_qp0_ah(struct hfi1_ibport *ibp, struct rvt_ah *ah, u32 dlid) { struct rdma_ah_attr attr; struct rvt_qp *qp0; int ret = -EINVAL; memset(&attr, 0, sizeof(attr)); attr.type = ah->ibah.type; hfi1_update_sm_ah_attr(ibp, &attr, dlid); rcu_read_lock(); qp0 = rcu_dereference(ibp->rvp.qp[0]); if (qp0) ret = rdma_modify_ah(&ah->ibah, &attr); rcu_read_unlock(); return ret; } static struct ib_ah *hfi1_create_qp0_ah(struct hfi1_ibport *ibp, u32 dlid) { struct rdma_ah_attr attr; struct ib_ah *ah = ERR_PTR(-EINVAL); struct rvt_qp *qp0; struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); struct hfi1_devdata *dd = dd_from_ppd(ppd); u32 port_num = ppd->port; memset(&attr, 0, sizeof(attr)); attr.type = rdma_ah_find_type(&dd->verbs_dev.rdi.ibdev, port_num); hfi1_update_sm_ah_attr(ibp, &attr, dlid); rcu_read_lock(); qp0 = rcu_dereference(ibp->rvp.qp[0]); if (qp0) ah = rdma_create_ah(qp0->ibqp.pd, &attr, 0); rcu_read_unlock(); return ah; } static void send_trap(struct hfi1_ibport *ibp, struct trap_node *trap) { struct ib_mad_send_buf *send_buf; struct ib_mad_agent *agent; struct opa_smp *smp; unsigned long flags; int pkey_idx; u32 qpn = ppd_from_ibp(ibp)->sm_trap_qp; agent = ibp->rvp.send_agent; if (!agent) { cleanup_traps(ibp, trap); return; } /* o14-3.2.1 */ if (driver_lstate(ppd_from_ibp(ibp)) != IB_PORT_ACTIVE) { cleanup_traps(ibp, trap); return; } /* Add the trap to the list if necessary and see if we can send it */ trap = check_and_add_trap(ibp, trap); if (!trap) return; pkey_idx = hfi1_lookup_pkey_idx(ibp, LIM_MGMT_P_KEY); if (pkey_idx < 0) { pr_warn("%s: failed to find limited mgmt pkey, defaulting 0x%x\n", __func__, hfi1_get_pkey(ibp, 1)); pkey_idx = 1; } send_buf = ib_create_send_mad(agent, qpn, pkey_idx, 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, GFP_ATOMIC, IB_MGMT_BASE_VERSION); if (IS_ERR(send_buf)) return; smp = send_buf->mad; smp->base_version = OPA_MGMT_BASE_VERSION; smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; smp->class_version = OPA_SM_CLASS_VERSION; smp->method = IB_MGMT_METHOD_TRAP; /* Only update the transaction ID for new traps (o13-5). */ if (trap->tid == 0) { ibp->rvp.tid++; /* make sure that tid != 0 */ if (ibp->rvp.tid == 0) ibp->rvp.tid++; trap->tid = cpu_to_be64(ibp->rvp.tid); } smp->tid = trap->tid; smp->attr_id = IB_SMP_ATTR_NOTICE; /* o14-1: smp->mkey = 0; */ memcpy(smp->route.lid.data, &trap->data, trap->len); spin_lock_irqsave(&ibp->rvp.lock, flags); if (!ibp->rvp.sm_ah) { if (ibp->rvp.sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) { struct ib_ah *ah; ah = hfi1_create_qp0_ah(ibp, ibp->rvp.sm_lid); if (IS_ERR(ah)) { spin_unlock_irqrestore(&ibp->rvp.lock, flags); return; } send_buf->ah = ah; ibp->rvp.sm_ah = ibah_to_rvtah(ah); } else { spin_unlock_irqrestore(&ibp->rvp.lock, flags); return; } } else { send_buf->ah = &ibp->rvp.sm_ah->ibah; } /* * If the trap was repressed while things were getting set up, don't * bother sending it. This could happen for a retry. */ if (trap->repress) { list_del(&trap->list); spin_unlock_irqrestore(&ibp->rvp.lock, flags); kfree(trap); ib_free_send_mad(send_buf); return; } trap->in_use = 0; spin_unlock_irqrestore(&ibp->rvp.lock, flags); if (ib_post_send_mad(send_buf, NULL)) ib_free_send_mad(send_buf); } void hfi1_handle_trap_timer(struct timer_list *t) { struct hfi1_ibport *ibp = from_timer(ibp, t, rvp.trap_timer); struct trap_node *trap = NULL; unsigned long flags; int i; /* Find the trap with the highest priority */ spin_lock_irqsave(&ibp->rvp.lock, flags); for (i = 0; !trap && i < RVT_MAX_TRAP_LISTS; i++) { trap = list_first_entry_or_null(&ibp->rvp.trap_lists[i].list, struct trap_node, list); } spin_unlock_irqrestore(&ibp->rvp.lock, flags); if (trap) send_trap(ibp, trap); } static struct trap_node *create_trap_node(u8 type, __be16 trap_num, u32 lid) { struct trap_node *trap; trap = kzalloc(sizeof(*trap), GFP_ATOMIC); if (!trap) return NULL; INIT_LIST_HEAD(&trap->list); trap->data.generic_type = type; trap->data.prod_type_lsb = IB_NOTICE_PROD_CA; trap->data.trap_num = trap_num; trap->data.issuer_lid = cpu_to_be32(lid); return trap; } /* * Send a bad P_Key trap (ch. 14.3.8). */ void hfi1_bad_pkey(struct hfi1_ibport *ibp, u32 key, u32 sl, u32 qp1, u32 qp2, u32 lid1, u32 lid2) { struct trap_node *trap; u32 lid = ppd_from_ibp(ibp)->lid; ibp->rvp.n_pkt_drops++; ibp->rvp.pkey_violations++; trap = create_trap_node(IB_NOTICE_TYPE_SECURITY, OPA_TRAP_BAD_P_KEY, lid); if (!trap) return; /* Send violation trap */ trap->data.ntc_257_258.lid1 = cpu_to_be32(lid1); trap->data.ntc_257_258.lid2 = cpu_to_be32(lid2); trap->data.ntc_257_258.key = cpu_to_be32(key); trap->data.ntc_257_258.sl = sl << 3; trap->data.ntc_257_258.qp1 = cpu_to_be32(qp1); trap->data.ntc_257_258.qp2 = cpu_to_be32(qp2); trap->len = sizeof(trap->data); send_trap(ibp, trap); } /* * Send a bad M_Key trap (ch. 14.3.9). */ static void bad_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad, __be64 mkey, __be32 dr_slid, u8 return_path[], u8 hop_cnt) { struct trap_node *trap; u32 lid = ppd_from_ibp(ibp)->lid; trap = create_trap_node(IB_NOTICE_TYPE_SECURITY, OPA_TRAP_BAD_M_KEY, lid); if (!trap) return; /* Send violation trap */ trap->data.ntc_256.lid = trap->data.issuer_lid; trap->data.ntc_256.method = mad->method; trap->data.ntc_256.attr_id = mad->attr_id; trap->data.ntc_256.attr_mod = mad->attr_mod; trap->data.ntc_256.mkey = mkey; if (mad->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { trap->data.ntc_256.dr_slid = dr_slid; trap->data.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE; if (hop_cnt > ARRAY_SIZE(trap->data.ntc_256.dr_rtn_path)) { trap->data.ntc_256.dr_trunc_hop |= IB_NOTICE_TRAP_DR_TRUNC; hop_cnt = ARRAY_SIZE(trap->data.ntc_256.dr_rtn_path); } trap->data.ntc_256.dr_trunc_hop |= hop_cnt; memcpy(trap->data.ntc_256.dr_rtn_path, return_path, hop_cnt); } trap->len = sizeof(trap->data); send_trap(ibp, trap); } /* * Send a Port Capability Mask Changed trap (ch. 14.3.11). */ void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u32 port_num) { struct trap_node *trap; struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi); struct hfi1_devdata *dd = dd_from_dev(verbs_dev); struct hfi1_ibport *ibp = &dd->pport[port_num - 1].ibport_data; u32 lid = ppd_from_ibp(ibp)->lid; trap = create_trap_node(IB_NOTICE_TYPE_INFO, OPA_TRAP_CHANGE_CAPABILITY, lid); if (!trap) return; trap->data.ntc_144.lid = trap->data.issuer_lid; trap->data.ntc_144.new_cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags); trap->data.ntc_144.cap_mask3 = cpu_to_be16(ibp->rvp.port_cap3_flags); trap->len = sizeof(trap->data); send_trap(ibp, trap); } /* * Send a System Image GUID Changed trap (ch. 14.3.12). */ void hfi1_sys_guid_chg(struct hfi1_ibport *ibp) { struct trap_node *trap; u32 lid = ppd_from_ibp(ibp)->lid; trap = create_trap_node(IB_NOTICE_TYPE_INFO, OPA_TRAP_CHANGE_SYSGUID, lid); if (!trap) return; trap->data.ntc_145.new_sys_guid = ib_hfi1_sys_image_guid; trap->data.ntc_145.lid = trap->data.issuer_lid; trap->len = sizeof(trap->data); send_trap(ibp, trap); } /* * Send a Node Description Changed trap (ch. 14.3.13). */ void hfi1_node_desc_chg(struct hfi1_ibport *ibp) { struct trap_node *trap; u32 lid = ppd_from_ibp(ibp)->lid; trap = create_trap_node(IB_NOTICE_TYPE_INFO, OPA_TRAP_CHANGE_CAPABILITY, lid); if (!trap) return; trap->data.ntc_144.lid = trap->data.issuer_lid; trap->data.ntc_144.change_flags = cpu_to_be16(OPA_NOTICE_TRAP_NODE_DESC_CHG); trap->len = sizeof(trap->data); send_trap(ibp, trap); } static int __subn_get_opa_nodedesc(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u32 port, u32 *resp_len, u32 max_len) { struct opa_node_description *nd; if (am || smp_length_check(sizeof(*nd), max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } nd = (struct opa_node_description *)data; memcpy(nd->data, ibdev->node_desc, sizeof(nd->data)); if (resp_len) *resp_len += sizeof(*nd); return reply((struct ib_mad_hdr *)smp); } static int __subn_get_opa_nodeinfo(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u32 port, u32 *resp_len, u32 max_len) { struct opa_node_info *ni; struct hfi1_devdata *dd = dd_from_ibdev(ibdev); u32 pidx = port - 1; /* IB number port from 1, hw from 0 */ ni = (struct opa_node_info *)data; /* GUID 0 is illegal */ if (am || pidx >= dd->num_pports || ibdev->node_guid == 0 || smp_length_check(sizeof(*ni), max_len) || get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX) == 0) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } ni->port_guid = get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX); ni->base_version = OPA_MGMT_BASE_VERSION; ni->class_version = OPA_SM_CLASS_VERSION; ni->node_type = 1; /* channel adapter */ ni->num_ports = ibdev->phys_port_cnt; /* This is already in network order */ ni->system_image_guid = ib_hfi1_sys_image_guid; ni->node_guid = ibdev->node_guid; ni->partition_cap = cpu_to_be16(hfi1_get_npkeys(dd)); ni->device_id = cpu_to_be16(dd->pcidev->device); ni->revision = cpu_to_be32(dd->minrev); ni->local_port_num = port; ni->vendor_id[0] = dd->oui1; ni->vendor_id[1] = dd->oui2; ni->vendor_id[2] = dd->oui3; if (resp_len) *resp_len += sizeof(*ni); return reply((struct ib_mad_hdr *)smp); } static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev, u32 port) { struct ib_node_info *nip = (struct ib_node_info *)&smp->data; struct hfi1_devdata *dd = dd_from_ibdev(ibdev); u32 pidx = port - 1; /* IB number port from 1, hw from 0 */ /* GUID 0 is illegal */ if (smp->attr_mod || pidx >= dd->num_pports || ibdev->node_guid == 0 || get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX) == 0) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } nip->port_guid = get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX); nip->base_version = OPA_MGMT_BASE_VERSION; nip->class_version = OPA_SM_CLASS_VERSION; nip->node_type = 1; /* channel adapter */ nip->num_ports = ibdev->phys_port_cnt; /* This is already in network order */ nip->sys_guid = ib_hfi1_sys_image_guid; nip->node_guid = ibdev->node_guid; nip->partition_cap = cpu_to_be16(hfi1_get_npkeys(dd)); nip->device_id = cpu_to_be16(dd->pcidev->device); nip->revision = cpu_to_be32(dd->minrev); nip->local_port_num = port; nip->vendor_id[0] = dd->oui1; nip->vendor_id[1] = dd->oui2; nip->vendor_id[2] = dd->oui3; return reply((struct ib_mad_hdr *)smp); } static void set_link_width_enabled(struct hfi1_pportdata *ppd, u32 w) { (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LWID_ENB, w); } static void set_link_width_downgrade_enabled(struct hfi1_pportdata *ppd, u32 w) { (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LWID_DG_ENB, w); } static void set_link_speed_enabled(struct hfi1_pportdata *ppd, u32 s) { (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_SPD_ENB, s); } static int check_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad, int mad_flags, __be64 mkey, __be32 dr_slid, u8 return_path[], u8 hop_cnt) { int valid_mkey = 0; int ret = 0; /* Is the mkey in the process of expiring? */ if (ibp->rvp.mkey_lease_timeout && time_after_eq(jiffies, ibp->rvp.mkey_lease_timeout)) { /* Clear timeout and mkey protection field. */ ibp->rvp.mkey_lease_timeout = 0; ibp->rvp.mkeyprot = 0; } if ((mad_flags & IB_MAD_IGNORE_MKEY) || ibp->rvp.mkey == 0 || ibp->rvp.mkey == mkey) valid_mkey = 1; /* Unset lease timeout on any valid Get/Set/TrapRepress */ if (valid_mkey && ibp->rvp.mkey_lease_timeout && (mad->method == IB_MGMT_METHOD_GET || mad->method == IB_MGMT_METHOD_SET || mad->method == IB_MGMT_METHOD_TRAP_REPRESS)) ibp->rvp.mkey_lease_timeout = 0; if (!valid_mkey) { switch (mad->method) { case IB_MGMT_METHOD_GET: /* Bad mkey not a violation below level 2 */ if (ibp->rvp.mkeyprot < 2) break; fallthrough; case IB_MGMT_METHOD_SET: case IB_MGMT_METHOD_TRAP_REPRESS: if (ibp->rvp.mkey_violations != 0xFFFF) ++ibp->rvp.mkey_violations; if (!ibp->rvp.mkey_lease_timeout && ibp->rvp.mkey_lease_period) ibp->rvp.mkey_lease_timeout = jiffies + ibp->rvp.mkey_lease_period * HZ; /* Generate a trap notice. */ bad_mkey(ibp, mad, mkey, dr_slid, return_path, hop_cnt); ret = 1; } } return ret; } /* * The SMA caches reads from LCB registers in case the LCB is unavailable. * (The LCB is unavailable in certain link states, for example.) */ struct lcb_datum { u32 off; u64 val; }; static struct lcb_datum lcb_cache[] = { { DC_LCB_STS_ROUND_TRIP_LTP_CNT, 0 }, }; static int write_lcb_cache(u32 off, u64 val) { int i; for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) { if (lcb_cache[i].off == off) { lcb_cache[i].val = val; return 0; } } pr_warn("%s bad offset 0x%x\n", __func__, off); return -1; } static int read_lcb_cache(u32 off, u64 *val) { int i; for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) { if (lcb_cache[i].off == off) { *val = lcb_cache[i].val; return 0; } } pr_warn("%s bad offset 0x%x\n", __func__, off); return -1; } void read_ltp_rtt(struct hfi1_devdata *dd) { u64 reg; if (read_lcb_csr(dd, DC_LCB_STS_ROUND_TRIP_LTP_CNT, &reg)) dd_dev_err(dd, "%s: unable to read LTP RTT\n", __func__); else write_lcb_cache(DC_LCB_STS_ROUND_TRIP_LTP_CNT, reg); } static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u32 port, u32 *resp_len, u32 max_len) { int i; struct hfi1_devdata *dd; struct hfi1_pportdata *ppd; struct hfi1_ibport *ibp; struct opa_port_info *pi = (struct opa_port_info *)data; u8 mtu; u8 credit_rate; u8 is_beaconing_active; u32 state; u32 num_ports = OPA_AM_NPORT(am); u32 start_of_sm_config = OPA_AM_START_SM_CFG(am); u32 buffer_units; u64 tmp = 0; if (num_ports != 1 || smp_length_check(sizeof(*pi), max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } dd = dd_from_ibdev(ibdev); /* IB numbers ports from 1, hw from 0 */ ppd = dd->pport + (port - 1); ibp = &ppd->ibport_data; if (ppd->vls_supported / 2 > ARRAY_SIZE(pi->neigh_mtu.pvlx_to_mtu) || ppd->vls_supported > ARRAY_SIZE(dd->vld)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } pi->lid = cpu_to_be32(ppd->lid); /* Only return the mkey if the protection field allows it. */ if (!(smp->method == IB_MGMT_METHOD_GET && ibp->rvp.mkey != smp->mkey && ibp->rvp.mkeyprot == 1)) pi->mkey = ibp->rvp.mkey; pi->subnet_prefix = ibp->rvp.gid_prefix; pi->sm_lid = cpu_to_be32(ibp->rvp.sm_lid); pi->ib_cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags); pi->mkey_lease_period = cpu_to_be16(ibp->rvp.mkey_lease_period); pi->sm_trap_qp = cpu_to_be32(ppd->sm_trap_qp); pi->sa_qp = cpu_to_be32(ppd->sa_qp); pi->link_width.enabled = cpu_to_be16(ppd->link_width_enabled); pi->link_width.supported = cpu_to_be16(ppd->link_width_supported); pi->link_width.active = cpu_to_be16(ppd->link_width_active); pi->link_width_downgrade.supported = cpu_to_be16(ppd->link_width_downgrade_supported); pi->link_width_downgrade.enabled = cpu_to_be16(ppd->link_width_downgrade_enabled); pi->link_width_downgrade.tx_active = cpu_to_be16(ppd->link_width_downgrade_tx_active); pi->link_width_downgrade.rx_active = cpu_to_be16(ppd->link_width_downgrade_rx_active); pi->link_speed.supported = cpu_to_be16(ppd->link_speed_supported); pi->link_speed.active = cpu_to_be16(ppd->link_speed_active); pi->link_speed.enabled = cpu_to_be16(ppd->link_speed_enabled); state = driver_lstate(ppd); if (start_of_sm_config && (state == IB_PORT_INIT)) ppd->is_sm_config_started = 1; pi->port_phys_conf = (ppd->port_type & 0xf); pi->port_states.ledenable_offlinereason = ppd->neighbor_normal << 4; pi->port_states.ledenable_offlinereason |= ppd->is_sm_config_started << 5; /* * This pairs with the memory barrier in hfi1_start_led_override to * ensure that we read the correct state of LED beaconing represented * by led_override_timer_active */ smp_rmb(); is_beaconing_active = !!atomic_read(&ppd->led_override_timer_active); pi->port_states.ledenable_offlinereason |= is_beaconing_active << 6; pi->port_states.ledenable_offlinereason |= ppd->offline_disabled_reason; pi->port_states.portphysstate_portstate = (driver_pstate(ppd) << 4) | state; pi->mkeyprotect_lmc = (ibp->rvp.mkeyprot << 6) | ppd->lmc; memset(pi->neigh_mtu.pvlx_to_mtu, 0, sizeof(pi->neigh_mtu.pvlx_to_mtu)); for (i = 0; i < ppd->vls_supported; i++) { mtu = mtu_to_enum(dd->vld[i].mtu, HFI1_DEFAULT_ACTIVE_MTU); if ((i % 2) == 0) pi->neigh_mtu.pvlx_to_mtu[i / 2] |= (mtu << 4); else pi->neigh_mtu.pvlx_to_mtu[i / 2] |= mtu; } /* don't forget VL 15 */ mtu = mtu_to_enum(dd->vld[15].mtu, 2048); pi->neigh_mtu.pvlx_to_mtu[15 / 2] |= mtu; pi->smsl = ibp->rvp.sm_sl & OPA_PI_MASK_SMSL; pi->operational_vls = hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_OP_VLS); pi->partenforce_filterraw |= (ppd->linkinit_reason & OPA_PI_MASK_LINKINIT_REASON); if (ppd->part_enforce & HFI1_PART_ENFORCE_IN) pi->partenforce_filterraw |= OPA_PI_MASK_PARTITION_ENFORCE_IN; if (ppd->part_enforce & HFI1_PART_ENFORCE_OUT) pi->partenforce_filterraw |= OPA_PI_MASK_PARTITION_ENFORCE_OUT; pi->mkey_violations = cpu_to_be16(ibp->rvp.mkey_violations); /* P_KeyViolations are counted by hardware. */ pi->pkey_violations = cpu_to_be16(ibp->rvp.pkey_violations); pi->qkey_violations = cpu_to_be16(ibp->rvp.qkey_violations); pi->vl.cap = ppd->vls_supported; pi->vl.high_limit = cpu_to_be16(ibp->rvp.vl_high_limit); pi->vl.arb_high_cap = (u8)hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_VL_HIGH_CAP); pi->vl.arb_low_cap = (u8)hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_VL_LOW_CAP); pi->clientrereg_subnettimeout = ibp->rvp.subnet_timeout; pi->port_link_mode = cpu_to_be16(OPA_PORT_LINK_MODE_OPA << 10 | OPA_PORT_LINK_MODE_OPA << 5 | OPA_PORT_LINK_MODE_OPA); pi->port_ltp_crc_mode = cpu_to_be16(ppd->port_ltp_crc_mode); pi->port_mode = cpu_to_be16( ppd->is_active_optimize_enabled ? OPA_PI_MASK_PORT_ACTIVE_OPTOMIZE : 0); pi->port_packet_format.supported = cpu_to_be16(OPA_PORT_PACKET_FORMAT_9B | OPA_PORT_PACKET_FORMAT_16B); pi->port_packet_format.enabled = cpu_to_be16(OPA_PORT_PACKET_FORMAT_9B | OPA_PORT_PACKET_FORMAT_16B); /* flit_control.interleave is (OPA V1, version .76): * bits use * ---- --- * 2 res * 2 DistanceSupported * 2 DistanceEnabled * 5 MaxNextLevelTxEnabled * 5 MaxNestLevelRxSupported * * HFI supports only "distance mode 1" (see OPA V1, version .76, * section 9.6.2), so set DistanceSupported, DistanceEnabled * to 0x1. */ pi->flit_control.interleave = cpu_to_be16(0x1400); pi->link_down_reason = ppd->local_link_down_reason.sma; pi->neigh_link_down_reason = ppd->neigh_link_down_reason.sma; pi->port_error_action = cpu_to_be32(ppd->port_error_action); pi->mtucap = mtu_to_enum(hfi1_max_mtu, IB_MTU_4096); /* 32.768 usec. response time (guessing) */ pi->resptimevalue = 3; pi->local_port_num = port; /* buffer info for FM */ pi->overall_buffer_space = cpu_to_be16(dd->link_credits); pi->neigh_node_guid = cpu_to_be64(ppd->neighbor_guid); pi->neigh_port_num = ppd->neighbor_port_number; pi->port_neigh_mode = (ppd->neighbor_type & OPA_PI_MASK_NEIGH_NODE_TYPE) | (ppd->mgmt_allowed ? OPA_PI_MASK_NEIGH_MGMT_ALLOWED : 0) | (ppd->neighbor_fm_security ? OPA_PI_MASK_NEIGH_FW_AUTH_BYPASS : 0); /* HFIs shall always return VL15 credits to their * neighbor in a timely manner, without any credit return pacing. */ credit_rate = 0; buffer_units = (dd->vau) & OPA_PI_MASK_BUF_UNIT_BUF_ALLOC; buffer_units |= (dd->vcu << 3) & OPA_PI_MASK_BUF_UNIT_CREDIT_ACK; buffer_units |= (credit_rate << 6) & OPA_PI_MASK_BUF_UNIT_VL15_CREDIT_RATE; buffer_units |= (dd->vl15_init << 11) & OPA_PI_MASK_BUF_UNIT_VL15_INIT; pi->buffer_units = cpu_to_be32(buffer_units); pi->opa_cap_mask = cpu_to_be16(ibp->rvp.port_cap3_flags); pi->collectivemask_multicastmask = ((OPA_COLLECTIVE_NR & 0x7) << 3 | (OPA_MCAST_NR & 0x7)); /* HFI supports a replay buffer 128 LTPs in size */ pi->replay_depth.buffer = 0x80; /* read the cached value of DC_LCB_STS_ROUND_TRIP_LTP_CNT */ read_lcb_cache(DC_LCB_STS_ROUND_TRIP_LTP_CNT, &tmp); /* * this counter is 16 bits wide, but the replay_depth.wire * variable is only 8 bits */ if (tmp > 0xff) tmp = 0xff; pi->replay_depth.wire = tmp; if (resp_len) *resp_len += sizeof(struct opa_port_info); return reply((struct ib_mad_hdr *)smp); } /** * get_pkeys - return the PKEY table * @dd: the hfi1_ib device * @port: the IB port number * @pkeys: the pkey table is placed here */ static int get_pkeys(struct hfi1_devdata *dd, u32 port, u16 *pkeys) { struct hfi1_pportdata *ppd = dd->pport + port - 1; memcpy(pkeys, ppd->pkeys, sizeof(ppd->pkeys)); return 0; } static int __subn_get_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u32 port, u32 *resp_len, u32 max_len) { struct hfi1_devdata *dd = dd_from_ibdev(ibdev); u32 n_blocks_req = OPA_AM_NBLK(am); u32 start_block = am & 0x7ff; __be16 *p; u16 *q; int i; u16 n_blocks_avail; unsigned npkeys = hfi1_get_npkeys(dd); size_t size; if (n_blocks_req == 0) { pr_warn("OPA Get PKey AM Invalid : P = %d; B = 0x%x; N = 0x%x\n", port, start_block, n_blocks_req); smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } n_blocks_avail = (u16)(npkeys / OPA_PARTITION_TABLE_BLK_SIZE) + 1; size = (n_blocks_req * OPA_PARTITION_TABLE_BLK_SIZE) * sizeof(u16); if (smp_length_check(size, max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } if (start_block + n_blocks_req > n_blocks_avail || n_blocks_req > OPA_NUM_PKEY_BLOCKS_PER_SMP) { pr_warn("OPA Get PKey AM Invalid : s 0x%x; req 0x%x; " "avail 0x%x; blk/smp 0x%lx\n", start_block, n_blocks_req, n_blocks_avail, OPA_NUM_PKEY_BLOCKS_PER_SMP); smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } p = (__be16 *)data; q = (u16 *)data; /* get the real pkeys if we are requesting the first block */ if (start_block == 0) { get_pkeys(dd, port, q); for (i = 0; i < npkeys; i++) p[i] = cpu_to_be16(q[i]); if (resp_len) *resp_len += size; } else { smp->status |= IB_SMP_INVALID_FIELD; } return reply((struct ib_mad_hdr *)smp); } enum { HFI_TRANSITION_DISALLOWED, HFI_TRANSITION_IGNORED, HFI_TRANSITION_ALLOWED, HFI_TRANSITION_UNDEFINED, }; /* * Use shortened names to improve readability of * {logical,physical}_state_transitions */ enum { __D = HFI_TRANSITION_DISALLOWED, __I = HFI_TRANSITION_IGNORED, __A = HFI_TRANSITION_ALLOWED, __U = HFI_TRANSITION_UNDEFINED, }; /* * IB_PORTPHYSSTATE_POLLING (2) through OPA_PORTPHYSSTATE_MAX (11) are * represented in physical_state_transitions. */ #define __N_PHYSTATES (OPA_PORTPHYSSTATE_MAX - IB_PORTPHYSSTATE_POLLING + 1) /* * Within physical_state_transitions, rows represent "old" states, * columns "new" states, and physical_state_transitions.allowed[old][new] * indicates if the transition from old state to new state is legal (see * OPAg1v1, Table 6-4). */ static const struct { u8 allowed[__N_PHYSTATES][__N_PHYSTATES]; } physical_state_transitions = { { /* 2 3 4 5 6 7 8 9 10 11 */ /* 2 */ { __A, __A, __D, __D, __D, __D, __D, __D, __D, __D }, /* 3 */ { __A, __I, __D, __D, __D, __D, __D, __D, __D, __A }, /* 4 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U }, /* 5 */ { __A, __A, __D, __I, __D, __D, __D, __D, __D, __D }, /* 6 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U }, /* 7 */ { __D, __A, __D, __D, __D, __I, __D, __D, __D, __D }, /* 8 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U }, /* 9 */ { __I, __A, __D, __D, __D, __D, __D, __I, __D, __D }, /*10 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U }, /*11 */ { __D, __A, __D, __D, __D, __D, __D, __D, __D, __I }, } }; /* * IB_PORT_DOWN (1) through IB_PORT_ACTIVE_DEFER (5) are represented * logical_state_transitions */ #define __N_LOGICAL_STATES (IB_PORT_ACTIVE_DEFER - IB_PORT_DOWN + 1) /* * Within logical_state_transitions rows represent "old" states, * columns "new" states, and logical_state_transitions.allowed[old][new] * indicates if the transition from old state to new state is legal (see * OPAg1v1, Table 9-12). */ static const struct { u8 allowed[__N_LOGICAL_STATES][__N_LOGICAL_STATES]; } logical_state_transitions = { { /* 1 2 3 4 5 */ /* 1 */ { __I, __D, __D, __D, __U}, /* 2 */ { __D, __I, __A, __D, __U}, /* 3 */ { __D, __D, __I, __A, __U}, /* 4 */ { __D, __D, __I, __I, __U}, /* 5 */ { __U, __U, __U, __U, __U}, } }; static int logical_transition_allowed(int old, int new) { if (old < IB_PORT_NOP || old > IB_PORT_ACTIVE_DEFER || new < IB_PORT_NOP || new > IB_PORT_ACTIVE_DEFER) { pr_warn("invalid logical state(s) (old %d new %d)\n", old, new); return HFI_TRANSITION_UNDEFINED; } if (new == IB_PORT_NOP) return HFI_TRANSITION_ALLOWED; /* always allowed */ /* adjust states for indexing into logical_state_transitions */ old -= IB_PORT_DOWN; new -= IB_PORT_DOWN; if (old < 0 || new < 0) return HFI_TRANSITION_UNDEFINED; return logical_state_transitions.allowed[old][new]; } static int physical_transition_allowed(int old, int new) { if (old < IB_PORTPHYSSTATE_NOP || old > OPA_PORTPHYSSTATE_MAX || new < IB_PORTPHYSSTATE_NOP || new > OPA_PORTPHYSSTATE_MAX) { pr_warn("invalid physical state(s) (old %d new %d)\n", old, new); return HFI_TRANSITION_UNDEFINED; } if (new == IB_PORTPHYSSTATE_NOP) return HFI_TRANSITION_ALLOWED; /* always allowed */ /* adjust states for indexing into physical_state_transitions */ old -= IB_PORTPHYSSTATE_POLLING; new -= IB_PORTPHYSSTATE_POLLING; if (old < 0 || new < 0) return HFI_TRANSITION_UNDEFINED; return physical_state_transitions.allowed[old][new]; } static int port_states_transition_allowed(struct hfi1_pportdata *ppd, u32 logical_new, u32 physical_new) { u32 physical_old = driver_pstate(ppd); u32 logical_old = driver_lstate(ppd); int ret, logical_allowed, physical_allowed; ret = logical_transition_allowed(logical_old, logical_new); logical_allowed = ret; if (ret == HFI_TRANSITION_DISALLOWED || ret == HFI_TRANSITION_UNDEFINED) { pr_warn("invalid logical state transition %s -> %s\n", opa_lstate_name(logical_old), opa_lstate_name(logical_new)); return ret; } ret = physical_transition_allowed(physical_old, physical_new); physical_allowed = ret; if (ret == HFI_TRANSITION_DISALLOWED || ret == HFI_TRANSITION_UNDEFINED) { pr_warn("invalid physical state transition %s -> %s\n", opa_pstate_name(physical_old), opa_pstate_name(physical_new)); return ret; } if (logical_allowed == HFI_TRANSITION_IGNORED && physical_allowed == HFI_TRANSITION_IGNORED) return HFI_TRANSITION_IGNORED; /* * A change request of Physical Port State from * 'Offline' to 'Polling' should be ignored. */ if ((physical_old == OPA_PORTPHYSSTATE_OFFLINE) && (physical_new == IB_PORTPHYSSTATE_POLLING)) return HFI_TRANSITION_IGNORED; /* * Either physical_allowed or logical_allowed is * HFI_TRANSITION_ALLOWED. */ return HFI_TRANSITION_ALLOWED; } static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp, u32 logical_state, u32 phys_state, int local_mad) { struct hfi1_devdata *dd = ppd->dd; u32 link_state; int ret; ret = port_states_transition_allowed(ppd, logical_state, phys_state); if (ret == HFI_TRANSITION_DISALLOWED || ret == HFI_TRANSITION_UNDEFINED) { /* error message emitted above */ smp->status |= IB_SMP_INVALID_FIELD; return 0; } if (ret == HFI_TRANSITION_IGNORED) return 0; if ((phys_state != IB_PORTPHYSSTATE_NOP) && !(logical_state == IB_PORT_DOWN || logical_state == IB_PORT_NOP)){ pr_warn("SubnSet(OPA_PortInfo) port state invalid: logical_state 0x%x physical_state 0x%x\n", logical_state, phys_state); smp->status |= IB_SMP_INVALID_FIELD; } /* * Logical state changes are summarized in OPAv1g1 spec., * Table 9-12; physical state changes are summarized in * OPAv1g1 spec., Table 6.4. */ switch (logical_state) { case IB_PORT_NOP: if (phys_state == IB_PORTPHYSSTATE_NOP) break; fallthrough; case IB_PORT_DOWN: if (phys_state == IB_PORTPHYSSTATE_NOP) { link_state = HLS_DN_DOWNDEF; } else if (phys_state == IB_PORTPHYSSTATE_POLLING) { link_state = HLS_DN_POLL; set_link_down_reason(ppd, OPA_LINKDOWN_REASON_FM_BOUNCE, 0, OPA_LINKDOWN_REASON_FM_BOUNCE); } else if (phys_state == IB_PORTPHYSSTATE_DISABLED) { link_state = HLS_DN_DISABLE; } else { pr_warn("SubnSet(OPA_PortInfo) invalid physical state 0x%x\n", phys_state); smp->status |= IB_SMP_INVALID_FIELD; break; } if ((link_state == HLS_DN_POLL || link_state == HLS_DN_DOWNDEF)) { /* * Going to poll. No matter what the current state, * always move offline first, then tune and start the * link. This correctly handles a FM link bounce and * a link enable. Going offline is a no-op if already * offline. */ set_link_state(ppd, HLS_DN_OFFLINE); start_link(ppd); } else { set_link_state(ppd, link_state); } if (link_state == HLS_DN_DISABLE && (ppd->offline_disabled_reason > HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED) || ppd->offline_disabled_reason == HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))) ppd->offline_disabled_reason = HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED); /* * Don't send a reply if the response would be sent * through the disabled port. */ if (link_state == HLS_DN_DISABLE && !local_mad) return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; break; case IB_PORT_ARMED: ret = set_link_state(ppd, HLS_UP_ARMED); if (!ret) send_idle_sma(dd, SMA_IDLE_ARM); break; case IB_PORT_ACTIVE: if (ppd->neighbor_normal) { ret = set_link_state(ppd, HLS_UP_ACTIVE); if (ret == 0) send_idle_sma(dd, SMA_IDLE_ACTIVE); } else { pr_warn("SubnSet(OPA_PortInfo) Cannot move to Active with NeighborNormal 0\n"); smp->status |= IB_SMP_INVALID_FIELD; } break; default: pr_warn("SubnSet(OPA_PortInfo) invalid logical state 0x%x\n", logical_state); smp->status |= IB_SMP_INVALID_FIELD; } return 0; } /* * subn_set_opa_portinfo - set port information * @smp: the incoming SM packet * @ibdev: the infiniband device * @port: the port on the device * */ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u32 port, u32 *resp_len, u32 max_len, int local_mad) { struct opa_port_info *pi = (struct opa_port_info *)data; struct ib_event event; struct hfi1_devdata *dd; struct hfi1_pportdata *ppd; struct hfi1_ibport *ibp; u8 clientrereg; unsigned long flags; u32 smlid; u32 lid; u8 ls_old, ls_new, ps_new; u8 vls; u8 msl; u8 crc_enabled; u16 lse, lwe, mtu; u32 num_ports = OPA_AM_NPORT(am); u32 start_of_sm_config = OPA_AM_START_SM_CFG(am); int ret, i, invalid = 0, call_set_mtu = 0; int call_link_downgrade_policy = 0; if (num_ports != 1 || smp_length_check(sizeof(*pi), max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } lid = be32_to_cpu(pi->lid); if (lid & 0xFF000000) { pr_warn("OPA_PortInfo lid out of range: %X\n", lid); smp->status |= IB_SMP_INVALID_FIELD; goto get_only; } smlid = be32_to_cpu(pi->sm_lid); if (smlid & 0xFF000000) { pr_warn("OPA_PortInfo SM lid out of range: %X\n", smlid); smp->status |= IB_SMP_INVALID_FIELD; goto get_only; } clientrereg = (pi->clientrereg_subnettimeout & OPA_PI_MASK_CLIENT_REREGISTER); dd = dd_from_ibdev(ibdev); /* IB numbers ports from 1, hw from 0 */ ppd = dd->pport + (port - 1); ibp = &ppd->ibport_data; event.device = ibdev; event.element.port_num = port; ls_old = driver_lstate(ppd); ibp->rvp.mkey = pi->mkey; if (ibp->rvp.gid_prefix != pi->subnet_prefix) { ibp->rvp.gid_prefix = pi->subnet_prefix; event.event = IB_EVENT_GID_CHANGE; ib_dispatch_event(&event); } ibp->rvp.mkey_lease_period = be16_to_cpu(pi->mkey_lease_period); /* Must be a valid unicast LID address. */ if ((lid == 0 && ls_old > IB_PORT_INIT) || (hfi1_is_16B_mcast(lid))) { smp->status |= IB_SMP_INVALID_FIELD; pr_warn("SubnSet(OPA_PortInfo) lid invalid 0x%x\n", lid); } else if (ppd->lid != lid || ppd->lmc != (pi->mkeyprotect_lmc & OPA_PI_MASK_LMC)) { if (ppd->lid != lid) hfi1_set_uevent_bits(ppd, _HFI1_EVENT_LID_CHANGE_BIT); if (ppd->lmc != (pi->mkeyprotect_lmc & OPA_PI_MASK_LMC)) hfi1_set_uevent_bits(ppd, _HFI1_EVENT_LMC_CHANGE_BIT); hfi1_set_lid(ppd, lid, pi->mkeyprotect_lmc & OPA_PI_MASK_LMC); event.event = IB_EVENT_LID_CHANGE; ib_dispatch_event(&event); if (HFI1_PORT_GUID_INDEX + 1 < HFI1_GUIDS_PER_PORT) { /* Manufacture GID from LID to support extended * addresses */ ppd->guids[HFI1_PORT_GUID_INDEX + 1] = be64_to_cpu(OPA_MAKE_ID(lid)); event.event = IB_EVENT_GID_CHANGE; ib_dispatch_event(&event); } } msl = pi->smsl & OPA_PI_MASK_SMSL; if (pi->partenforce_filterraw & OPA_PI_MASK_LINKINIT_REASON) ppd->linkinit_reason = (pi->partenforce_filterraw & OPA_PI_MASK_LINKINIT_REASON); /* Must be a valid unicast LID address. */ if ((smlid == 0 && ls_old > IB_PORT_INIT) || (hfi1_is_16B_mcast(smlid))) { smp->status |= IB_SMP_INVALID_FIELD; pr_warn("SubnSet(OPA_PortInfo) smlid invalid 0x%x\n", smlid); } else if (smlid != ibp->rvp.sm_lid || msl != ibp->rvp.sm_sl) { pr_warn("SubnSet(OPA_PortInfo) smlid 0x%x\n", smlid); spin_lock_irqsave(&ibp->rvp.lock, flags); if (ibp->rvp.sm_ah) { if (smlid != ibp->rvp.sm_lid) hfi1_modify_qp0_ah(ibp, ibp->rvp.sm_ah, smlid); if (msl != ibp->rvp.sm_sl) rdma_ah_set_sl(&ibp->rvp.sm_ah->attr, msl); } spin_unlock_irqrestore(&ibp->rvp.lock, flags); if (smlid != ibp->rvp.sm_lid) ibp->rvp.sm_lid = smlid; if (msl != ibp->rvp.sm_sl) ibp->rvp.sm_sl = msl; event.event = IB_EVENT_SM_CHANGE; ib_dispatch_event(&event); } if (pi->link_down_reason == 0) { ppd->local_link_down_reason.sma = 0; ppd->local_link_down_reason.latest = 0; } if (pi->neigh_link_down_reason == 0) { ppd->neigh_link_down_reason.sma = 0; ppd->neigh_link_down_reason.latest = 0; } ppd->sm_trap_qp = be32_to_cpu(pi->sm_trap_qp); ppd->sa_qp = be32_to_cpu(pi->sa_qp); ppd->port_error_action = be32_to_cpu(pi->port_error_action); lwe = be16_to_cpu(pi->link_width.enabled); if (lwe) { if (lwe == OPA_LINK_WIDTH_RESET || lwe == OPA_LINK_WIDTH_RESET_OLD) set_link_width_enabled(ppd, ppd->link_width_supported); else if ((lwe & ~ppd->link_width_supported) == 0) set_link_width_enabled(ppd, lwe); else smp->status |= IB_SMP_INVALID_FIELD; } lwe = be16_to_cpu(pi->link_width_downgrade.enabled); /* LWD.E is always applied - 0 means "disabled" */ if (lwe == OPA_LINK_WIDTH_RESET || lwe == OPA_LINK_WIDTH_RESET_OLD) { set_link_width_downgrade_enabled(ppd, ppd-> link_width_downgrade_supported ); } else if ((lwe & ~ppd->link_width_downgrade_supported) == 0) { /* only set and apply if something changed */ if (lwe != ppd->link_width_downgrade_enabled) { set_link_width_downgrade_enabled(ppd, lwe); call_link_downgrade_policy = 1; } } else { smp->status |= IB_SMP_INVALID_FIELD; } lse = be16_to_cpu(pi->link_speed.enabled); if (lse) { if (lse & be16_to_cpu(pi->link_speed.supported)) set_link_speed_enabled(ppd, lse); else smp->status |= IB_SMP_INVALID_FIELD; } ibp->rvp.mkeyprot = (pi->mkeyprotect_lmc & OPA_PI_MASK_MKEY_PROT_BIT) >> 6; ibp->rvp.vl_high_limit = be16_to_cpu(pi->vl.high_limit) & 0xFF; (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_VL_HIGH_LIMIT, ibp->rvp.vl_high_limit); if (ppd->vls_supported / 2 > ARRAY_SIZE(pi->neigh_mtu.pvlx_to_mtu) || ppd->vls_supported > ARRAY_SIZE(dd->vld)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } for (i = 0; i < ppd->vls_supported; i++) { if ((i % 2) == 0) mtu = enum_to_mtu((pi->neigh_mtu.pvlx_to_mtu[i / 2] >> 4) & 0xF); else mtu = enum_to_mtu(pi->neigh_mtu.pvlx_to_mtu[i / 2] & 0xF); if (mtu == 0xffff) { pr_warn("SubnSet(OPA_PortInfo) mtu invalid %d (0x%x)\n", mtu, (pi->neigh_mtu.pvlx_to_mtu[0] >> 4) & 0xF); smp->status |= IB_SMP_INVALID_FIELD; mtu = hfi1_max_mtu; /* use a valid MTU */ } if (dd->vld[i].mtu != mtu) { dd_dev_info(dd, "MTU change on vl %d from %d to %d\n", i, dd->vld[i].mtu, mtu); dd->vld[i].mtu = mtu; call_set_mtu++; } } /* As per OPAV1 spec: VL15 must support and be configured * for operation with a 2048 or larger MTU. */ mtu = enum_to_mtu(pi->neigh_mtu.pvlx_to_mtu[15 / 2] & 0xF); if (mtu < 2048 || mtu == 0xffff) mtu = 2048; if (dd->vld[15].mtu != mtu) { dd_dev_info(dd, "MTU change on vl 15 from %d to %d\n", dd->vld[15].mtu, mtu); dd->vld[15].mtu = mtu; call_set_mtu++; } if (call_set_mtu) set_mtu(ppd); /* Set operational VLs */ vls = pi->operational_vls & OPA_PI_MASK_OPERATIONAL_VL; if (vls) { if (vls > ppd->vls_supported) { pr_warn("SubnSet(OPA_PortInfo) VL's supported invalid %d\n", pi->operational_vls); smp->status |= IB_SMP_INVALID_FIELD; } else { if (hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_OP_VLS, vls) == -EINVAL) smp->status |= IB_SMP_INVALID_FIELD; } } if (pi->mkey_violations == 0) ibp->rvp.mkey_violations = 0; if (pi->pkey_violations == 0) ibp->rvp.pkey_violations = 0; if (pi->qkey_violations == 0) ibp->rvp.qkey_violations = 0; ibp->rvp.subnet_timeout = pi->clientrereg_subnettimeout & OPA_PI_MASK_SUBNET_TIMEOUT; crc_enabled = be16_to_cpu(pi->port_ltp_crc_mode); crc_enabled >>= 4; crc_enabled &= 0xf; if (crc_enabled != 0) ppd->port_crc_mode_enabled = port_ltp_to_cap(crc_enabled); ppd->is_active_optimize_enabled = !!(be16_to_cpu(pi->port_mode) & OPA_PI_MASK_PORT_ACTIVE_OPTOMIZE); ls_new = pi->port_states.portphysstate_portstate & OPA_PI_MASK_PORT_STATE; ps_new = (pi->port_states.portphysstate_portstate & OPA_PI_MASK_PORT_PHYSICAL_STATE) >> 4; if (ls_old == IB_PORT_INIT) { if (start_of_sm_config) { if (ls_new == ls_old || (ls_new == IB_PORT_ARMED)) ppd->is_sm_config_started = 1; } else if (ls_new == IB_PORT_ARMED) { if (ppd->is_sm_config_started == 0) { invalid = 1; smp->status |= IB_SMP_INVALID_FIELD; } } } /* Handle CLIENT_REREGISTER event b/c SM asked us for it */ if (clientrereg) { event.event = IB_EVENT_CLIENT_REREGISTER; ib_dispatch_event(&event); } /* * Do the port state change now that the other link parameters * have been set. * Changing the port physical state only makes sense if the link * is down or is being set to down. */ if (!invalid) { ret = set_port_states(ppd, smp, ls_new, ps_new, local_mad); if (ret) return ret; } ret = __subn_get_opa_portinfo(smp, am, data, ibdev, port, resp_len, max_len); /* restore re-reg bit per o14-12.2.1 */ pi->clientrereg_subnettimeout |= clientrereg; /* * Apply the new link downgrade policy. This may result in a link * bounce. Do this after everything else so things are settled. * Possible problem: if setting the port state above fails, then * the policy change is not applied. */ if (call_link_downgrade_policy) apply_link_downgrade_policy(ppd, 0); return ret; get_only: return __subn_get_opa_portinfo(smp, am, data, ibdev, port, resp_len, max_len); } /** * set_pkeys - set the PKEY table for ctxt 0 * @dd: the hfi1_ib device * @port: the IB port number * @pkeys: the PKEY table */ static int set_pkeys(struct hfi1_devdata *dd, u32 port, u16 *pkeys) { struct hfi1_pportdata *ppd; int i; int changed = 0; int update_includes_mgmt_partition = 0; /* * IB port one/two always maps to context zero/one, * always a kernel context, no locking needed * If we get here with ppd setup, no need to check * that rcd is valid. */ ppd = dd->pport + (port - 1); /* * If the update does not include the management pkey, don't do it. */ for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) { if (pkeys[i] == LIM_MGMT_P_KEY) { update_includes_mgmt_partition = 1; break; } } if (!update_includes_mgmt_partition) return 1; for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) { u16 key = pkeys[i]; u16 okey = ppd->pkeys[i]; if (key == okey) continue; /* * The SM gives us the complete PKey table. We have * to ensure that we put the PKeys in the matching * slots. */ ppd->pkeys[i] = key; changed = 1; } if (changed) { (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); hfi1_event_pkey_change(dd, port); } return 0; } static int __subn_set_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u32 port, u32 *resp_len, u32 max_len) { struct hfi1_devdata *dd = dd_from_ibdev(ibdev); u32 n_blocks_sent = OPA_AM_NBLK(am); u32 start_block = am & 0x7ff; u16 *p = (u16 *)data; __be16 *q = (__be16 *)data; int i; u16 n_blocks_avail; unsigned npkeys = hfi1_get_npkeys(dd); u32 size = 0; if (n_blocks_sent == 0) { pr_warn("OPA Get PKey AM Invalid : P = %u; B = 0x%x; N = 0x%x\n", port, start_block, n_blocks_sent); smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } n_blocks_avail = (u16)(npkeys / OPA_PARTITION_TABLE_BLK_SIZE) + 1; size = sizeof(u16) * (n_blocks_sent * OPA_PARTITION_TABLE_BLK_SIZE); if (smp_length_check(size, max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } if (start_block + n_blocks_sent > n_blocks_avail || n_blocks_sent > OPA_NUM_PKEY_BLOCKS_PER_SMP) { pr_warn("OPA Set PKey AM Invalid : s 0x%x; req 0x%x; avail 0x%x; blk/smp 0x%lx\n", start_block, n_blocks_sent, n_blocks_avail, OPA_NUM_PKEY_BLOCKS_PER_SMP); smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } for (i = 0; i < n_blocks_sent * OPA_PARTITION_TABLE_BLK_SIZE; i++) p[i] = be16_to_cpu(q[i]); if (start_block == 0 && set_pkeys(dd, port, p) != 0) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } return __subn_get_opa_pkeytable(smp, am, data, ibdev, port, resp_len, max_len); } #define ILLEGAL_VL 12 /* * filter_sc2vlt changes mappings to VL15 to ILLEGAL_VL (except * for SC15, which must map to VL15). If we don't remap things this * way it is possible for VL15 counters to increment when we try to * send on a SC which is mapped to an invalid VL. * When getting the table convert ILLEGAL_VL back to VL15. */ static void filter_sc2vlt(void *data, bool set) { int i; u8 *pd = data; for (i = 0; i < OPA_MAX_SCS; i++) { if (i == 15) continue; if (set) { if ((pd[i] & 0x1f) == 0xf) pd[i] = ILLEGAL_VL; } else { if ((pd[i] & 0x1f) == ILLEGAL_VL) pd[i] = 0xf; } } } static int set_sc2vlt_tables(struct hfi1_devdata *dd, void *data) { u64 *val = data; filter_sc2vlt(data, true); write_csr(dd, SEND_SC2VLT0, *val++); write_csr(dd, SEND_SC2VLT1, *val++); write_csr(dd, SEND_SC2VLT2, *val++); write_csr(dd, SEND_SC2VLT3, *val++); write_seqlock_irq(&dd->sc2vl_lock); memcpy(dd->sc2vl, data, sizeof(dd->sc2vl)); write_sequnlock_irq(&dd->sc2vl_lock); return 0; } static int get_sc2vlt_tables(struct hfi1_devdata *dd, void *data) { u64 *val = (u64 *)data; *val++ = read_csr(dd, SEND_SC2VLT0); *val++ = read_csr(dd, SEND_SC2VLT1); *val++ = read_csr(dd, SEND_SC2VLT2); *val++ = read_csr(dd, SEND_SC2VLT3); filter_sc2vlt((u64 *)data, false); return 0; } static int __subn_get_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u32 port, u32 *resp_len, u32 max_len) { struct hfi1_ibport *ibp = to_iport(ibdev, port); u8 *p = data; size_t size = ARRAY_SIZE(ibp->sl_to_sc); /* == 32 */ unsigned i; if (am || smp_length_check(size, max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } for (i = 0; i < ARRAY_SIZE(ibp->sl_to_sc); i++) *p++ = ibp->sl_to_sc[i]; if (resp_len) *resp_len += size; return reply((struct ib_mad_hdr *)smp); } static int __subn_set_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u32 port, u32 *resp_len, u32 max_len) { struct hfi1_ibport *ibp = to_iport(ibdev, port); u8 *p = data; size_t size = ARRAY_SIZE(ibp->sl_to_sc); int i; u8 sc; if (am || smp_length_check(size, max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } for (i = 0; i < ARRAY_SIZE(ibp->sl_to_sc); i++) { sc = *p++; if (ibp->sl_to_sc[i] != sc) { ibp->sl_to_sc[i] = sc; /* Put all stale qps into error state */ hfi1_error_port_qps(ibp, i); } } return __subn_get_opa_sl_to_sc(smp, am, data, ibdev, port, resp_len, max_len); } static int __subn_get_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u32 port, u32 *resp_len, u32 max_len) { struct hfi1_ibport *ibp = to_iport(ibdev, port); u8 *p = data; size_t size = ARRAY_SIZE(ibp->sc_to_sl); /* == 32 */ unsigned i; if (am || smp_length_check(size, max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } for (i = 0; i < ARRAY_SIZE(ibp->sc_to_sl); i++) *p++ = ibp->sc_to_sl[i]; if (resp_len) *resp_len += size; return reply((struct ib_mad_hdr *)smp); } static int __subn_set_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u32 port, u32 *resp_len, u32 max_len) { struct hfi1_ibport *ibp = to_iport(ibdev, port); size_t size = ARRAY_SIZE(ibp->sc_to_sl); u8 *p = data; int i; if (am || smp_length_check(size, max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } for (i = 0; i < ARRAY_SIZE(ibp->sc_to_sl); i++) ibp->sc_to_sl[i] = *p++; return __subn_get_opa_sc_to_sl(smp, am, data, ibdev, port, resp_len, max_len); } static int __subn_get_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u32 port, u32 *resp_len, u32 max_len) { u32 n_blocks = OPA_AM_NBLK(am); struct hfi1_devdata *dd = dd_from_ibdev(ibdev); void *vp = (void *)data; size_t size = 4 * sizeof(u64); if (n_blocks != 1 || smp_length_check(size, max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } get_sc2vlt_tables(dd, vp); if (resp_len) *resp_len += size; return reply((struct ib_mad_hdr *)smp); } static int __subn_set_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u32 port, u32 *resp_len, u32 max_len) { u32 n_blocks = OPA_AM_NBLK(am); int async_update = OPA_AM_ASYNC(am); struct hfi1_devdata *dd = dd_from_ibdev(ibdev); void *vp = (void *)data; struct hfi1_pportdata *ppd; int lstate; /* * set_sc2vlt_tables writes the information contained in *data * to four 64-bit registers SendSC2VLt[0-3]. We need to make * sure *max_len is not greater than the total size of the four * SendSC2VLt[0-3] registers. */ size_t size = 4 * sizeof(u64); if (n_blocks != 1 || async_update || smp_length_check(size, max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } /* IB numbers ports from 1, hw from 0 */ ppd = dd->pport + (port - 1); lstate = driver_lstate(ppd); /* * it's known that async_update is 0 by this point, but include * the explicit check for clarity */ if (!async_update && (lstate == IB_PORT_ARMED || lstate == IB_PORT_ACTIVE)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } set_sc2vlt_tables(dd, vp); return __subn_get_opa_sc_to_vlt(smp, am, data, ibdev, port, resp_len, max_len); } static int __subn_get_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u32 port, u32 *resp_len, u32 max_len) { u32 n_blocks = OPA_AM_NPORT(am); struct hfi1_devdata *dd = dd_from_ibdev(ibdev); struct hfi1_pportdata *ppd; void *vp = (void *)data; int size = sizeof(struct sc2vlnt); if (n_blocks != 1 || smp_length_check(size, max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } ppd = dd->pport + (port - 1); fm_get_table(ppd, FM_TBL_SC2VLNT, vp); if (resp_len) *resp_len += size; return reply((struct ib_mad_hdr *)smp); } static int __subn_set_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u32 port, u32 *resp_len, u32 max_len) { u32 n_blocks = OPA_AM_NPORT(am); struct hfi1_devdata *dd = dd_from_ibdev(ibdev); struct hfi1_pportdata *ppd; void *vp = (void *)data; int lstate; int size = sizeof(struct sc2vlnt); if (n_blocks != 1 || smp_length_check(size, max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } /* IB numbers ports from 1, hw from 0 */ ppd = dd->pport + (port - 1); lstate = driver_lstate(ppd); if (lstate == IB_PORT_ARMED || lstate == IB_PORT_ACTIVE) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } ppd = dd->pport + (port - 1); fm_set_table(ppd, FM_TBL_SC2VLNT, vp); return __subn_get_opa_sc_to_vlnt(smp, am, data, ibdev, port, resp_len, max_len); } static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u32 port, u32 *resp_len, u32 max_len) { u32 nports = OPA_AM_NPORT(am); u32 start_of_sm_config = OPA_AM_START_SM_CFG(am); u32 lstate; struct hfi1_ibport *ibp; struct hfi1_pportdata *ppd; struct opa_port_state_info *psi = (struct opa_port_state_info *)data; if (nports != 1 || smp_length_check(sizeof(*psi), max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } ibp = to_iport(ibdev, port); ppd = ppd_from_ibp(ibp); lstate = driver_lstate(ppd); if (start_of_sm_config && (lstate == IB_PORT_INIT)) ppd->is_sm_config_started = 1; psi->port_states.ledenable_offlinereason = ppd->neighbor_normal << 4; psi->port_states.ledenable_offlinereason |= ppd->is_sm_config_started << 5; psi->port_states.ledenable_offlinereason |= ppd->offline_disabled_reason; psi->port_states.portphysstate_portstate = (driver_pstate(ppd) << 4) | (lstate & 0xf); psi->link_width_downgrade_tx_active = cpu_to_be16(ppd->link_width_downgrade_tx_active); psi->link_width_downgrade_rx_active = cpu_to_be16(ppd->link_width_downgrade_rx_active); if (resp_len) *resp_len += sizeof(struct opa_port_state_info); return reply((struct ib_mad_hdr *)smp); } static int __subn_set_opa_psi(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u32 port, u32 *resp_len, u32 max_len, int local_mad) { u32 nports = OPA_AM_NPORT(am); u32 start_of_sm_config = OPA_AM_START_SM_CFG(am); u32 ls_old; u8 ls_new, ps_new; struct hfi1_ibport *ibp; struct hfi1_pportdata *ppd; struct opa_port_state_info *psi = (struct opa_port_state_info *)data; int ret, invalid = 0; if (nports != 1 || smp_length_check(sizeof(*psi), max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } ibp = to_iport(ibdev, port); ppd = ppd_from_ibp(ibp); ls_old = driver_lstate(ppd); ls_new = port_states_to_logical_state(&psi->port_states); ps_new = port_states_to_phys_state(&psi->port_states); if (ls_old == IB_PORT_INIT) { if (start_of_sm_config) { if (ls_new == ls_old || (ls_new == IB_PORT_ARMED)) ppd->is_sm_config_started = 1; } else if (ls_new == IB_PORT_ARMED) { if (ppd->is_sm_config_started == 0) { invalid = 1; smp->status |= IB_SMP_INVALID_FIELD; } } } if (!invalid) { ret = set_port_states(ppd, smp, ls_new, ps_new, local_mad); if (ret) return ret; } return __subn_get_opa_psi(smp, am, data, ibdev, port, resp_len, max_len); } static int __subn_get_opa_cable_info(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u32 port, u32 *resp_len, u32 max_len) { struct hfi1_devdata *dd = dd_from_ibdev(ibdev); u32 addr = OPA_AM_CI_ADDR(am); u32 len = OPA_AM_CI_LEN(am) + 1; int ret; if (dd->pport->port_type != PORT_TYPE_QSFP || smp_length_check(len, max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } #define __CI_PAGE_SIZE BIT(7) /* 128 bytes */ #define __CI_PAGE_MASK ~(__CI_PAGE_SIZE - 1) #define __CI_PAGE_NUM(a) ((a) & __CI_PAGE_MASK) /* * check that addr is within spec, and * addr and (addr + len - 1) are on the same "page" */ if (addr >= 4096 || (__CI_PAGE_NUM(addr) != __CI_PAGE_NUM(addr + len - 1))) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } ret = get_cable_info(dd, port, addr, len, data); if (ret == -ENODEV) { smp->status |= IB_SMP_UNSUP_METH_ATTR; return reply((struct ib_mad_hdr *)smp); } /* The address range for the CableInfo SMA query is wider than the * memory available on the QSFP cable. We want to return a valid * response, albeit zeroed out, for address ranges beyond available * memory but that are within the CableInfo query spec */ if (ret < 0 && ret != -ERANGE) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } if (resp_len) *resp_len += len; return reply((struct ib_mad_hdr *)smp); } static int __subn_get_opa_bct(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u32 port, u32 *resp_len, u32 max_len) { u32 num_ports = OPA_AM_NPORT(am); struct hfi1_devdata *dd = dd_from_ibdev(ibdev); struct hfi1_pportdata *ppd; struct buffer_control *p = (struct buffer_control *)data; int size = sizeof(struct buffer_control); if (num_ports != 1 || smp_length_check(size, max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } ppd = dd->pport + (port - 1); fm_get_table(ppd, FM_TBL_BUFFER_CONTROL, p); trace_bct_get(dd, p); if (resp_len) *resp_len += size; return reply((struct ib_mad_hdr *)smp); } static int __subn_set_opa_bct(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u32 port, u32 *resp_len, u32 max_len) { u32 num_ports = OPA_AM_NPORT(am); struct hfi1_devdata *dd = dd_from_ibdev(ibdev); struct hfi1_pportdata *ppd; struct buffer_control *p = (struct buffer_control *)data; if (num_ports != 1 || smp_length_check(sizeof(*p), max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } ppd = dd->pport + (port - 1); trace_bct_set(dd, p); if (fm_set_table(ppd, FM_TBL_BUFFER_CONTROL, p) < 0) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } return __subn_get_opa_bct(smp, am, data, ibdev, port, resp_len, max_len); } static int __subn_get_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u32 port, u32 *resp_len, u32 max_len) { struct hfi1_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port)); u32 num_ports = OPA_AM_NPORT(am); u8 section = (am & 0x00ff0000) >> 16; u8 *p = data; int size = 256; if (num_ports != 1 || smp_length_check(size, max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } switch (section) { case OPA_VLARB_LOW_ELEMENTS: fm_get_table(ppd, FM_TBL_VL_LOW_ARB, p); break; case OPA_VLARB_HIGH_ELEMENTS: fm_get_table(ppd, FM_TBL_VL_HIGH_ARB, p); break; case OPA_VLARB_PREEMPT_ELEMENTS: fm_get_table(ppd, FM_TBL_VL_PREEMPT_ELEMS, p); break; case OPA_VLARB_PREEMPT_MATRIX: fm_get_table(ppd, FM_TBL_VL_PREEMPT_MATRIX, p); break; default: pr_warn("OPA SubnGet(VL Arb) AM Invalid : 0x%x\n", be32_to_cpu(smp->attr_mod)); smp->status |= IB_SMP_INVALID_FIELD; size = 0; break; } if (size > 0 && resp_len) *resp_len += size; return reply((struct ib_mad_hdr *)smp); } static int __subn_set_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u32 port, u32 *resp_len, u32 max_len) { struct hfi1_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port)); u32 num_ports = OPA_AM_NPORT(am); u8 section = (am & 0x00ff0000) >> 16; u8 *p = data; int size = 256; if (num_ports != 1 || smp_length_check(size, max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } switch (section) { case OPA_VLARB_LOW_ELEMENTS: (void)fm_set_table(ppd, FM_TBL_VL_LOW_ARB, p); break; case OPA_VLARB_HIGH_ELEMENTS: (void)fm_set_table(ppd, FM_TBL_VL_HIGH_ARB, p); break; /* * neither OPA_VLARB_PREEMPT_ELEMENTS, or OPA_VLARB_PREEMPT_MATRIX * can be changed from the default values */ case OPA_VLARB_PREEMPT_ELEMENTS: case OPA_VLARB_PREEMPT_MATRIX: smp->status |= IB_SMP_UNSUP_METH_ATTR; break; default: pr_warn("OPA SubnSet(VL Arb) AM Invalid : 0x%x\n", be32_to_cpu(smp->attr_mod)); smp->status |= IB_SMP_INVALID_FIELD; break; } return __subn_get_opa_vl_arb(smp, am, data, ibdev, port, resp_len, max_len); } struct opa_pma_mad { struct ib_mad_hdr mad_hdr; u8 data[2024]; } __packed; struct opa_port_status_req { __u8 port_num; __u8 reserved[3]; __be32 vl_select_mask; }; #define VL_MASK_ALL 0x00000000000080ffUL struct opa_port_status_rsp { __u8 port_num; __u8 reserved[3]; __be32 vl_select_mask; /* Data counters */ __be64 port_xmit_data; __be64 port_rcv_data; __be64 port_xmit_pkts; __be64 port_rcv_pkts; __be64 port_multicast_xmit_pkts; __be64 port_multicast_rcv_pkts; __be64 port_xmit_wait; __be64 sw_port_congestion; __be64 port_rcv_fecn; __be64 port_rcv_becn; __be64 port_xmit_time_cong; __be64 port_xmit_wasted_bw; __be64 port_xmit_wait_data; __be64 port_rcv_bubble; __be64 port_mark_fecn; /* Error counters */ __be64 port_rcv_constraint_errors; __be64 port_rcv_switch_relay_errors; __be64 port_xmit_discards; __be64 port_xmit_constraint_errors; __be64 port_rcv_remote_physical_errors; __be64 local_link_integrity_errors; __be64 port_rcv_errors; __be64 excessive_buffer_overruns; __be64 fm_config_errors; __be32 link_error_recovery; __be32 link_downed; u8 uncorrectable_errors; u8 link_quality_indicator; /* 5res, 3bit */ u8 res2[6]; struct _vls_pctrs { /* per-VL Data counters */ __be64 port_vl_xmit_data; __be64 port_vl_rcv_data; __be64 port_vl_xmit_pkts; __be64 port_vl_rcv_pkts; __be64 port_vl_xmit_wait; __be64 sw_port_vl_congestion; __be64 port_vl_rcv_fecn; __be64 port_vl_rcv_becn; __be64 port_xmit_time_cong; __be64 port_vl_xmit_wasted_bw; __be64 port_vl_xmit_wait_data; __be64 port_vl_rcv_bubble; __be64 port_vl_mark_fecn; __be64 port_vl_xmit_discards; } vls[]; /* real array size defined by # bits set in vl_select_mask */ }; enum counter_selects { CS_PORT_XMIT_DATA = (1 << 31), CS_PORT_RCV_DATA = (1 << 30), CS_PORT_XMIT_PKTS = (1 << 29), CS_PORT_RCV_PKTS = (1 << 28), CS_PORT_MCAST_XMIT_PKTS = (1 << 27), CS_PORT_MCAST_RCV_PKTS = (1 << 26), CS_PORT_XMIT_WAIT = (1 << 25), CS_SW_PORT_CONGESTION = (1 << 24), CS_PORT_RCV_FECN = (1 << 23), CS_PORT_RCV_BECN = (1 << 22), CS_PORT_XMIT_TIME_CONG = (1 << 21), CS_PORT_XMIT_WASTED_BW = (1 << 20), CS_PORT_XMIT_WAIT_DATA = (1 << 19), CS_PORT_RCV_BUBBLE = (1 << 18), CS_PORT_MARK_FECN = (1 << 17), CS_PORT_RCV_CONSTRAINT_ERRORS = (1 << 16), CS_PORT_RCV_SWITCH_RELAY_ERRORS = (1 << 15), CS_PORT_XMIT_DISCARDS = (1 << 14), CS_PORT_XMIT_CONSTRAINT_ERRORS = (1 << 13), CS_PORT_RCV_REMOTE_PHYSICAL_ERRORS = (1 << 12), CS_LOCAL_LINK_INTEGRITY_ERRORS = (1 << 11), CS_PORT_RCV_ERRORS = (1 << 10), CS_EXCESSIVE_BUFFER_OVERRUNS = (1 << 9), CS_FM_CONFIG_ERRORS = (1 << 8), CS_LINK_ERROR_RECOVERY = (1 << 7), CS_LINK_DOWNED = (1 << 6), CS_UNCORRECTABLE_ERRORS = (1 << 5), }; struct opa_clear_port_status { __be64 port_select_mask[4]; __be32 counter_select_mask; }; struct opa_aggregate { __be16 attr_id; __be16 err_reqlength; /* 1 bit, 8 res, 7 bit */ __be32 attr_mod; u8 data[]; }; #define MSK_LLI 0x000000f0 #define MSK_LLI_SFT 4 #define MSK_LER 0x0000000f #define MSK_LER_SFT 0 #define ADD_LLI 8 #define ADD_LER 2 /* Request contains first three fields, response contains those plus the rest */ struct opa_port_data_counters_msg { __be64 port_select_mask[4]; __be32 vl_select_mask; __be32 resolution; /* Response fields follow */ struct _port_dctrs { u8 port_number; u8 reserved2[3]; __be32 link_quality_indicator; /* 29res, 3bit */ /* Data counters */ __be64 port_xmit_data; __be64 port_rcv_data; __be64 port_xmit_pkts; __be64 port_rcv_pkts; __be64 port_multicast_xmit_pkts; __be64 port_multicast_rcv_pkts; __be64 port_xmit_wait; __be64 sw_port_congestion; __be64 port_rcv_fecn; __be64 port_rcv_becn; __be64 port_xmit_time_cong; __be64 port_xmit_wasted_bw; __be64 port_xmit_wait_data; __be64 port_rcv_bubble; __be64 port_mark_fecn; __be64 port_error_counter_summary; /* Sum of error counts/port */ struct _vls_dctrs { /* per-VL Data counters */ __be64 port_vl_xmit_data; __be64 port_vl_rcv_data; __be64 port_vl_xmit_pkts; __be64 port_vl_rcv_pkts; __be64 port_vl_xmit_wait; __be64 sw_port_vl_congestion; __be64 port_vl_rcv_fecn; __be64 port_vl_rcv_becn; __be64 port_xmit_time_cong; __be64 port_vl_xmit_wasted_bw; __be64 port_vl_xmit_wait_data; __be64 port_vl_rcv_bubble; __be64 port_vl_mark_fecn; } vls[]; /* array size defined by #bits set in vl_select_mask*/ } port; }; struct opa_port_error_counters64_msg { /* * Request contains first two fields, response contains the * whole magilla */ __be64 port_select_mask[4]; __be32 vl_select_mask; /* Response-only fields follow */ __be32 reserved1; struct _port_ectrs { u8 port_number; u8 reserved2[7]; __be64 port_rcv_constraint_errors; __be64 port_rcv_switch_relay_errors; __be64 port_xmit_discards; __be64 port_xmit_constraint_errors; __be64 port_rcv_remote_physical_errors; __be64 local_link_integrity_errors; __be64 port_rcv_errors; __be64 excessive_buffer_overruns; __be64 fm_config_errors; __be32 link_error_recovery; __be32 link_downed; u8 uncorrectable_errors; u8 reserved3[7]; struct _vls_ectrs { __be64 port_vl_xmit_discards; } vls[]; /* array size defined by #bits set in vl_select_mask */ } port; }; struct opa_port_error_info_msg { __be64 port_select_mask[4]; __be32 error_info_select_mask; __be32 reserved1; struct _port_ei { u8 port_number; u8 reserved2[7]; /* PortRcvErrorInfo */ struct { u8 status_and_code; union { u8 raw[17]; struct { /* EI1to12 format */ u8 packet_flit1[8]; u8 packet_flit2[8]; u8 remaining_flit_bits12; } ei1to12; struct { u8 packet_bytes[8]; u8 remaining_flit_bits; } ei13; } ei; u8 reserved3[6]; } __packed port_rcv_ei; /* ExcessiveBufferOverrunInfo */ struct { u8 status_and_sc; u8 reserved4[7]; } __packed excessive_buffer_overrun_ei; /* PortXmitConstraintErrorInfo */ struct { u8 status; u8 reserved5; __be16 pkey; __be32 slid; } __packed port_xmit_constraint_ei; /* PortRcvConstraintErrorInfo */ struct { u8 status; u8 reserved6; __be16 pkey; __be32 slid; } __packed port_rcv_constraint_ei; /* PortRcvSwitchRelayErrorInfo */ struct { u8 status_and_code; u8 reserved7[3]; __u32 error_info; } __packed port_rcv_switch_relay_ei; /* UncorrectableErrorInfo */ struct { u8 status_and_code; u8 reserved8; } __packed uncorrectable_ei; /* FMConfigErrorInfo */ struct { u8 status_and_code; u8 error_info; } __packed fm_config_ei; __u32 reserved9; } port; }; /* opa_port_error_info_msg error_info_select_mask bit definitions */ enum error_info_selects { ES_PORT_RCV_ERROR_INFO = (1 << 31), ES_EXCESSIVE_BUFFER_OVERRUN_INFO = (1 << 30), ES_PORT_XMIT_CONSTRAINT_ERROR_INFO = (1 << 29), ES_PORT_RCV_CONSTRAINT_ERROR_INFO = (1 << 28), ES_PORT_RCV_SWITCH_RELAY_ERROR_INFO = (1 << 27), ES_UNCORRECTABLE_ERROR_INFO = (1 << 26), ES_FM_CONFIG_ERROR_INFO = (1 << 25) }; static int pma_get_opa_classportinfo(struct opa_pma_mad *pmp, struct ib_device *ibdev, u32 *resp_len) { struct opa_class_port_info *p = (struct opa_class_port_info *)pmp->data; memset(pmp->data, 0, sizeof(pmp->data)); if (pmp->mad_hdr.attr_mod != 0) pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; p->base_version = OPA_MGMT_BASE_VERSION; p->class_version = OPA_SM_CLASS_VERSION; /* * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec. */ p->cap_mask2_resp_time = cpu_to_be32(18); if (resp_len) *resp_len += sizeof(*p); return reply((struct ib_mad_hdr *)pmp); } static void a0_portstatus(struct hfi1_pportdata *ppd, struct opa_port_status_rsp *rsp) { if (!is_bx(ppd->dd)) { unsigned long vl; u64 sum_vl_xmit_wait = 0; unsigned long vl_all_mask = VL_MASK_ALL; for_each_set_bit(vl, &vl_all_mask, BITS_PER_LONG) { u64 tmp = sum_vl_xmit_wait + read_port_cntr(ppd, C_TX_WAIT_VL, idx_from_vl(vl)); if (tmp < sum_vl_xmit_wait) { /* we wrapped */ sum_vl_xmit_wait = (u64)~0; break; } sum_vl_xmit_wait = tmp; } if (be64_to_cpu(rsp->port_xmit_wait) > sum_vl_xmit_wait) rsp->port_xmit_wait = cpu_to_be64(sum_vl_xmit_wait); } } /** * tx_link_width - convert link width bitmask to integer * value representing actual link width. * @link_width: width of active link * @return: return index of the bit set in link_width var * * The function convert and return the index of bit set * that indicate the current link width. */ u16 tx_link_width(u16 link_width) { int n = LINK_WIDTH_DEFAULT; u16 tx_width = n; while (link_width && n) { if (link_width & (1 << (n - 1))) { tx_width = n; break; } n--; } return tx_width; } /** * get_xmit_wait_counters - Convert HFI 's SendWaitCnt/SendWaitVlCnt * counter in unit of TXE cycle times to flit times. * @ppd: info of physical Hfi port * @link_width: width of active link * @link_speed: speed of active link * @vl: represent VL0-VL7, VL15 for PortVLXmitWait counters request * and if vl value is C_VL_COUNT, it represent SendWaitCnt * counter request * @return: return SendWaitCnt/SendWaitVlCnt counter value per vl. * * Convert SendWaitCnt/SendWaitVlCnt counter from TXE cycle times to * flit times. Call this function to samples these counters. This * function will calculate for previous state transition and update * current state at end of function using ppd->prev_link_width and * ppd->port_vl_xmit_wait_last to port_vl_xmit_wait_curr and link_width. */ u64 get_xmit_wait_counters(struct hfi1_pportdata *ppd, u16 link_width, u16 link_speed, int vl) { u64 port_vl_xmit_wait_curr; u64 delta_vl_xmit_wait; u64 xmit_wait_val; if (vl > C_VL_COUNT) return 0; if (vl < C_VL_COUNT) port_vl_xmit_wait_curr = read_port_cntr(ppd, C_TX_WAIT_VL, vl); else port_vl_xmit_wait_curr = read_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL); xmit_wait_val = port_vl_xmit_wait_curr - ppd->port_vl_xmit_wait_last[vl]; delta_vl_xmit_wait = convert_xmit_counter(xmit_wait_val, ppd->prev_link_width, link_speed); ppd->vl_xmit_flit_cnt[vl] += delta_vl_xmit_wait; ppd->port_vl_xmit_wait_last[vl] = port_vl_xmit_wait_curr; ppd->prev_link_width = link_width; return ppd->vl_xmit_flit_cnt[vl]; } static int pma_get_opa_portstatus(struct opa_pma_mad *pmp, struct ib_device *ibdev, u32 port, u32 *resp_len) { struct opa_port_status_req *req = (struct opa_port_status_req *)pmp->data; struct hfi1_devdata *dd = dd_from_ibdev(ibdev); struct opa_port_status_rsp *rsp; unsigned long vl_select_mask = be32_to_cpu(req->vl_select_mask); unsigned long vl; size_t response_data_size; u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24; u32 port_num = req->port_num; u8 num_vls = hweight64(vl_select_mask); struct _vls_pctrs *vlinfo; struct hfi1_ibport *ibp = to_iport(ibdev, port); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); int vfi; u64 tmp, tmp2; u16 link_width; u16 link_speed; response_data_size = struct_size(rsp, vls, num_vls); if (response_data_size > sizeof(pmp->data)) { pmp->mad_hdr.status |= OPA_PM_STATUS_REQUEST_TOO_LARGE; return reply((struct ib_mad_hdr *)pmp); } if (nports != 1 || (port_num && port_num != port) || num_vls > OPA_MAX_VLS || (vl_select_mask & ~VL_MASK_ALL)) { pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)pmp); } memset(pmp->data, 0, sizeof(pmp->data)); rsp = (struct opa_port_status_rsp *)pmp->data; if (port_num) rsp->port_num = port_num; else rsp->port_num = port; rsp->port_rcv_constraint_errors = cpu_to_be64(read_port_cntr(ppd, C_SW_RCV_CSTR_ERR, CNTR_INVALID_VL)); hfi1_read_link_quality(dd, &rsp->link_quality_indicator); rsp->vl_select_mask = cpu_to_be32((u32)vl_select_mask); rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS, CNTR_INVALID_VL)); rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS, CNTR_INVALID_VL)); rsp->port_xmit_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_PKTS, CNTR_INVALID_VL)); rsp->port_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_PKTS, CNTR_INVALID_VL)); rsp->port_multicast_xmit_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_MC_XMIT_PKTS, CNTR_INVALID_VL)); rsp->port_multicast_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_MC_RCV_PKTS, CNTR_INVALID_VL)); /* * Convert PortXmitWait counter from TXE cycle times * to flit times. */ link_width = tx_link_width(ppd->link_width_downgrade_tx_active); link_speed = get_link_speed(ppd->link_speed_active); rsp->port_xmit_wait = cpu_to_be64(get_xmit_wait_counters(ppd, link_width, link_speed, C_VL_COUNT)); rsp->port_rcv_fecn = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL)); rsp->port_rcv_becn = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL)); rsp->port_xmit_discards = cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD, CNTR_INVALID_VL)); rsp->port_xmit_constraint_errors = cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR, CNTR_INVALID_VL)); rsp->port_rcv_remote_physical_errors = cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR, CNTR_INVALID_VL)); rsp->local_link_integrity_errors = cpu_to_be64(read_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL)); tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL); tmp2 = tmp + read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL); if (tmp2 > (u32)UINT_MAX || tmp2 < tmp) { /* overflow/wrapped */ rsp->link_error_recovery = cpu_to_be32(~0); } else { rsp->link_error_recovery = cpu_to_be32(tmp2); } rsp->port_rcv_errors = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL)); rsp->excessive_buffer_overruns = cpu_to_be64(read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL)); rsp->fm_config_errors = cpu_to_be64(read_dev_cntr(dd, C_DC_FM_CFG_ERR, CNTR_INVALID_VL)); rsp->link_downed = cpu_to_be32(read_port_cntr(ppd, C_SW_LINK_DOWN, CNTR_INVALID_VL)); /* rsp->uncorrectable_errors is 8 bits wide, and it pegs at 0xff */ tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL); rsp->uncorrectable_errors = tmp < 0x100 ? (tmp & 0xff) : 0xff; vlinfo = &rsp->vls[0]; vfi = 0; /* The vl_select_mask has been checked above, and we know * that it contains only entries which represent valid VLs. * So in the for_each_set_bit() loop below, we don't need * any additional checks for vl. */ for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) { memset(vlinfo, 0, sizeof(*vlinfo)); tmp = read_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl)); rsp->vls[vfi].port_vl_rcv_data = cpu_to_be64(tmp); rsp->vls[vfi].port_vl_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_RX_PKT_VL, idx_from_vl(vl))); rsp->vls[vfi].port_vl_xmit_data = cpu_to_be64(read_port_cntr(ppd, C_TX_FLIT_VL, idx_from_vl(vl))); rsp->vls[vfi].port_vl_xmit_pkts = cpu_to_be64(read_port_cntr(ppd, C_TX_PKT_VL, idx_from_vl(vl))); /* * Convert PortVlXmitWait counter from TXE cycle * times to flit times. */ rsp->vls[vfi].port_vl_xmit_wait = cpu_to_be64(get_xmit_wait_counters(ppd, link_width, link_speed, idx_from_vl(vl))); rsp->vls[vfi].port_vl_rcv_fecn = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN_VL, idx_from_vl(vl))); rsp->vls[vfi].port_vl_rcv_becn = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN_VL, idx_from_vl(vl))); rsp->vls[vfi].port_vl_xmit_discards = cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD_VL, idx_from_vl(vl))); vlinfo++; vfi++; } a0_portstatus(ppd, rsp); if (resp_len) *resp_len += response_data_size; return reply((struct ib_mad_hdr *)pmp); } static u64 get_error_counter_summary(struct ib_device *ibdev, u32 port, u8 res_lli, u8 res_ler) { struct hfi1_devdata *dd = dd_from_ibdev(ibdev); struct hfi1_ibport *ibp = to_iport(ibdev, port); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); u64 error_counter_summary = 0, tmp; error_counter_summary += read_port_cntr(ppd, C_SW_RCV_CSTR_ERR, CNTR_INVALID_VL); /* port_rcv_switch_relay_errors is 0 for HFIs */ error_counter_summary += read_port_cntr(ppd, C_SW_XMIT_DSCD, CNTR_INVALID_VL); error_counter_summary += read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR, CNTR_INVALID_VL); error_counter_summary += read_dev_cntr(dd, C_DC_RMT_PHY_ERR, CNTR_INVALID_VL); /* local link integrity must be right-shifted by the lli resolution */ error_counter_summary += (read_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL) >> res_lli); /* link error recovery must b right-shifted by the ler resolution */ tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL); tmp += read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL); error_counter_summary += (tmp >> res_ler); error_counter_summary += read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL); error_counter_summary += read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL); error_counter_summary += read_dev_cntr(dd, C_DC_FM_CFG_ERR, CNTR_INVALID_VL); /* ppd->link_downed is a 32-bit value */ error_counter_summary += read_port_cntr(ppd, C_SW_LINK_DOWN, CNTR_INVALID_VL); tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL); /* this is an 8-bit quantity */ error_counter_summary += tmp < 0x100 ? (tmp & 0xff) : 0xff; return error_counter_summary; } static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp) { if (!is_bx(ppd->dd)) { unsigned long vl; u64 sum_vl_xmit_wait = 0; unsigned long vl_all_mask = VL_MASK_ALL; for_each_set_bit(vl, &vl_all_mask, BITS_PER_LONG) { u64 tmp = sum_vl_xmit_wait + read_port_cntr(ppd, C_TX_WAIT_VL, idx_from_vl(vl)); if (tmp < sum_vl_xmit_wait) { /* we wrapped */ sum_vl_xmit_wait = (u64)~0; break; } sum_vl_xmit_wait = tmp; } if (be64_to_cpu(rsp->port_xmit_wait) > sum_vl_xmit_wait) rsp->port_xmit_wait = cpu_to_be64(sum_vl_xmit_wait); } } static void pma_get_opa_port_dctrs(struct ib_device *ibdev, struct _port_dctrs *rsp) { struct hfi1_devdata *dd = dd_from_ibdev(ibdev); rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS, CNTR_INVALID_VL)); rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS, CNTR_INVALID_VL)); rsp->port_xmit_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_PKTS, CNTR_INVALID_VL)); rsp->port_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_PKTS, CNTR_INVALID_VL)); rsp->port_multicast_xmit_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_MC_XMIT_PKTS, CNTR_INVALID_VL)); rsp->port_multicast_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_MC_RCV_PKTS, CNTR_INVALID_VL)); } static int pma_get_opa_datacounters(struct opa_pma_mad *pmp, struct ib_device *ibdev, u32 port, u32 *resp_len) { struct opa_port_data_counters_msg *req = (struct opa_port_data_counters_msg *)pmp->data; struct hfi1_devdata *dd = dd_from_ibdev(ibdev); struct hfi1_ibport *ibp = to_iport(ibdev, port); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); struct _port_dctrs *rsp; struct _vls_dctrs *vlinfo; size_t response_data_size; u32 num_ports; u8 lq, num_vls; u8 res_lli, res_ler; u64 port_mask; u32 port_num; unsigned long vl; unsigned long vl_select_mask; int vfi; u16 link_width; u16 link_speed; num_ports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24; num_vls = hweight32(be32_to_cpu(req->vl_select_mask)); vl_select_mask = be32_to_cpu(req->vl_select_mask); res_lli = (u8)(be32_to_cpu(req->resolution) & MSK_LLI) >> MSK_LLI_SFT; res_lli = res_lli ? res_lli + ADD_LLI : 0; res_ler = (u8)(be32_to_cpu(req->resolution) & MSK_LER) >> MSK_LER_SFT; res_ler = res_ler ? res_ler + ADD_LER : 0; if (num_ports != 1 || (vl_select_mask & ~VL_MASK_ALL)) { pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)pmp); } /* Sanity check */ response_data_size = struct_size(req, port.vls, num_vls); if (response_data_size > sizeof(pmp->data)) { pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)pmp); } /* * The bit set in the mask needs to be consistent with the * port the request came in on. */ port_mask = be64_to_cpu(req->port_select_mask[3]); port_num = find_first_bit((unsigned long *)&port_mask, sizeof(port_mask) * 8); if (port_num != port) { pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)pmp); } rsp = &req->port; memset(rsp, 0, sizeof(*rsp)); rsp->port_number = port; /* * Note that link_quality_indicator is a 32 bit quantity in * 'datacounters' queries (as opposed to 'portinfo' queries, * where it's a byte). */ hfi1_read_link_quality(dd, &lq); rsp->link_quality_indicator = cpu_to_be32((u32)lq); pma_get_opa_port_dctrs(ibdev, rsp); /* * Convert PortXmitWait counter from TXE * cycle times to flit times. */ link_width = tx_link_width(ppd->link_width_downgrade_tx_active); link_speed = get_link_speed(ppd->link_speed_active); rsp->port_xmit_wait = cpu_to_be64(get_xmit_wait_counters(ppd, link_width, link_speed, C_VL_COUNT)); rsp->port_rcv_fecn = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL)); rsp->port_rcv_becn = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL)); rsp->port_error_counter_summary = cpu_to_be64(get_error_counter_summary(ibdev, port, res_lli, res_ler)); vlinfo = &rsp->vls[0]; vfi = 0; /* The vl_select_mask has been checked above, and we know * that it contains only entries which represent valid VLs. * So in the for_each_set_bit() loop below, we don't need * any additional checks for vl. */ for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) { memset(vlinfo, 0, sizeof(*vlinfo)); rsp->vls[vfi].port_vl_xmit_data = cpu_to_be64(read_port_cntr(ppd, C_TX_FLIT_VL, idx_from_vl(vl))); rsp->vls[vfi].port_vl_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl))); rsp->vls[vfi].port_vl_xmit_pkts = cpu_to_be64(read_port_cntr(ppd, C_TX_PKT_VL, idx_from_vl(vl))); rsp->vls[vfi].port_vl_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_RX_PKT_VL, idx_from_vl(vl))); /* * Convert PortVlXmitWait counter from TXE * cycle times to flit times. */ rsp->vls[vfi].port_vl_xmit_wait = cpu_to_be64(get_xmit_wait_counters(ppd, link_width, link_speed, idx_from_vl(vl))); rsp->vls[vfi].port_vl_rcv_fecn = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN_VL, idx_from_vl(vl))); rsp->vls[vfi].port_vl_rcv_becn = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN_VL, idx_from_vl(vl))); /* rsp->port_vl_xmit_time_cong is 0 for HFIs */ /* rsp->port_vl_xmit_wasted_bw ??? */ /* port_vl_xmit_wait_data - TXE (table 13-9 HFI spec) ??? * does this differ from rsp->vls[vfi].port_vl_xmit_wait */ /*rsp->vls[vfi].port_vl_mark_fecn = * cpu_to_be64(read_csr(dd, DCC_PRF_PORT_VL_MARK_FECN_CNT * + offset)); */ vlinfo++; vfi++; } a0_datacounters(ppd, rsp); if (resp_len) *resp_len += response_data_size; return reply((struct ib_mad_hdr *)pmp); } static int pma_get_ib_portcounters_ext(struct ib_pma_mad *pmp, struct ib_device *ibdev, u32 port) { struct ib_pma_portcounters_ext *p = (struct ib_pma_portcounters_ext *) pmp->data; struct _port_dctrs rsp; if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) { pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; goto bail; } memset(&rsp, 0, sizeof(rsp)); pma_get_opa_port_dctrs(ibdev, &rsp); p->port_xmit_data = rsp.port_xmit_data; p->port_rcv_data = rsp.port_rcv_data; p->port_xmit_packets = rsp.port_xmit_pkts; p->port_rcv_packets = rsp.port_rcv_pkts; p->port_unicast_xmit_packets = 0; p->port_unicast_rcv_packets = 0; p->port_multicast_xmit_packets = rsp.port_multicast_xmit_pkts; p->port_multicast_rcv_packets = rsp.port_multicast_rcv_pkts; bail: return reply((struct ib_mad_hdr *)pmp); } static void pma_get_opa_port_ectrs(struct ib_device *ibdev, struct _port_ectrs *rsp, u32 port) { u64 tmp, tmp2; struct hfi1_devdata *dd = dd_from_ibdev(ibdev); struct hfi1_ibport *ibp = to_iport(ibdev, port); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL); tmp2 = tmp + read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL); if (tmp2 > (u32)UINT_MAX || tmp2 < tmp) { /* overflow/wrapped */ rsp->link_error_recovery = cpu_to_be32(~0); } else { rsp->link_error_recovery = cpu_to_be32(tmp2); } rsp->link_downed = cpu_to_be32(read_port_cntr(ppd, C_SW_LINK_DOWN, CNTR_INVALID_VL)); rsp->port_rcv_errors = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL)); rsp->port_rcv_remote_physical_errors = cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR, CNTR_INVALID_VL)); rsp->port_rcv_switch_relay_errors = 0; rsp->port_xmit_discards = cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD, CNTR_INVALID_VL)); rsp->port_xmit_constraint_errors = cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR, CNTR_INVALID_VL)); rsp->port_rcv_constraint_errors = cpu_to_be64(read_port_cntr(ppd, C_SW_RCV_CSTR_ERR, CNTR_INVALID_VL)); rsp->local_link_integrity_errors = cpu_to_be64(read_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL)); rsp->excessive_buffer_overruns = cpu_to_be64(read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL)); } static int pma_get_opa_porterrors(struct opa_pma_mad *pmp, struct ib_device *ibdev, u32 port, u32 *resp_len) { size_t response_data_size; struct _port_ectrs *rsp; u32 port_num; struct opa_port_error_counters64_msg *req; struct hfi1_devdata *dd = dd_from_ibdev(ibdev); u32 num_ports; u8 num_pslm; u8 num_vls; struct hfi1_ibport *ibp; struct hfi1_pportdata *ppd; struct _vls_ectrs *vlinfo; unsigned long vl; u64 port_mask, tmp; unsigned long vl_select_mask; int vfi; req = (struct opa_port_error_counters64_msg *)pmp->data; num_ports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24; num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3])); num_vls = hweight32(be32_to_cpu(req->vl_select_mask)); if (num_ports != 1 || num_ports != num_pslm) { pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)pmp); } response_data_size = struct_size(req, port.vls, num_vls); if (response_data_size > sizeof(pmp->data)) { pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)pmp); } /* * The bit set in the mask needs to be consistent with the * port the request came in on. */ port_mask = be64_to_cpu(req->port_select_mask[3]); port_num = find_first_bit((unsigned long *)&port_mask, sizeof(port_mask) * 8); if (port_num != port) { pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)pmp); } rsp = &req->port; ibp = to_iport(ibdev, port_num); ppd = ppd_from_ibp(ibp); memset(rsp, 0, sizeof(*rsp)); rsp->port_number = port_num; pma_get_opa_port_ectrs(ibdev, rsp, port_num); rsp->port_rcv_remote_physical_errors = cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR, CNTR_INVALID_VL)); rsp->fm_config_errors = cpu_to_be64(read_dev_cntr(dd, C_DC_FM_CFG_ERR, CNTR_INVALID_VL)); tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL); rsp->uncorrectable_errors = tmp < 0x100 ? (tmp & 0xff) : 0xff; rsp->port_rcv_errors = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL)); vlinfo = &rsp->vls[0]; vfi = 0; vl_select_mask = be32_to_cpu(req->vl_select_mask); for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) { memset(vlinfo, 0, sizeof(*vlinfo)); rsp->vls[vfi].port_vl_xmit_discards = cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD_VL, idx_from_vl(vl))); vlinfo += 1; vfi++; } if (resp_len) *resp_len += response_data_size; return reply((struct ib_mad_hdr *)pmp); } static int pma_get_ib_portcounters(struct ib_pma_mad *pmp, struct ib_device *ibdev, u32 port) { struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) pmp->data; struct _port_ectrs rsp; u64 temp_link_overrun_errors; u64 temp_64; u32 temp_32; memset(&rsp, 0, sizeof(rsp)); pma_get_opa_port_ectrs(ibdev, &rsp, port); if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) { pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; goto bail; } p->symbol_error_counter = 0; /* N/A for OPA */ temp_32 = be32_to_cpu(rsp.link_error_recovery); if (temp_32 > 0xFFUL) p->link_error_recovery_counter = 0xFF; else p->link_error_recovery_counter = (u8)temp_32; temp_32 = be32_to_cpu(rsp.link_downed); if (temp_32 > 0xFFUL) p->link_downed_counter = 0xFF; else p->link_downed_counter = (u8)temp_32; temp_64 = be64_to_cpu(rsp.port_rcv_errors); if (temp_64 > 0xFFFFUL) p->port_rcv_errors = cpu_to_be16(0xFFFF); else p->port_rcv_errors = cpu_to_be16((u16)temp_64); temp_64 = be64_to_cpu(rsp.port_rcv_remote_physical_errors); if (temp_64 > 0xFFFFUL) p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF); else p->port_rcv_remphys_errors = cpu_to_be16((u16)temp_64); temp_64 = be64_to_cpu(rsp.port_rcv_switch_relay_errors); p->port_rcv_switch_relay_errors = cpu_to_be16((u16)temp_64); temp_64 = be64_to_cpu(rsp.port_xmit_discards); if (temp_64 > 0xFFFFUL) p->port_xmit_discards = cpu_to_be16(0xFFFF); else p->port_xmit_discards = cpu_to_be16((u16)temp_64); temp_64 = be64_to_cpu(rsp.port_xmit_constraint_errors); if (temp_64 > 0xFFUL) p->port_xmit_constraint_errors = 0xFF; else p->port_xmit_constraint_errors = (u8)temp_64; temp_64 = be64_to_cpu(rsp.port_rcv_constraint_errors); if (temp_64 > 0xFFUL) p->port_rcv_constraint_errors = 0xFFUL; else p->port_rcv_constraint_errors = (u8)temp_64; /* LocalLink: 7:4, BufferOverrun: 3:0 */ temp_64 = be64_to_cpu(rsp.local_link_integrity_errors); if (temp_64 > 0xFUL) temp_64 = 0xFUL; temp_link_overrun_errors = temp_64 << 4; temp_64 = be64_to_cpu(rsp.excessive_buffer_overruns); if (temp_64 > 0xFUL) temp_64 = 0xFUL; temp_link_overrun_errors |= temp_64; p->link_overrun_errors = (u8)temp_link_overrun_errors; p->vl15_dropped = 0; /* N/A for OPA */ bail: return reply((struct ib_mad_hdr *)pmp); } static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp, struct ib_device *ibdev, u32 port, u32 *resp_len) { size_t response_data_size; struct _port_ei *rsp; struct opa_port_error_info_msg *req; struct hfi1_devdata *dd = dd_from_ibdev(ibdev); u64 port_mask; u32 num_ports; u32 port_num; u8 num_pslm; u64 reg; req = (struct opa_port_error_info_msg *)pmp->data; rsp = &req->port; num_ports = OPA_AM_NPORT(be32_to_cpu(pmp->mad_hdr.attr_mod)); num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3])); memset(rsp, 0, sizeof(*rsp)); if (num_ports != 1 || num_ports != num_pslm) { pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)pmp); } /* Sanity check */ response_data_size = sizeof(struct opa_port_error_info_msg); if (response_data_size > sizeof(pmp->data)) { pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)pmp); } /* * The bit set in the mask needs to be consistent with the port * the request came in on. */ port_mask = be64_to_cpu(req->port_select_mask[3]); port_num = find_first_bit((unsigned long *)&port_mask, sizeof(port_mask) * 8); if (port_num != port) { pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)pmp); } rsp->port_number = port; /* PortRcvErrorInfo */ rsp->port_rcv_ei.status_and_code = dd->err_info_rcvport.status_and_code; memcpy(&rsp->port_rcv_ei.ei.ei1to12.packet_flit1, &dd->err_info_rcvport.packet_flit1, sizeof(u64)); memcpy(&rsp->port_rcv_ei.ei.ei1to12.packet_flit2, &dd->err_info_rcvport.packet_flit2, sizeof(u64)); /* ExcessiverBufferOverrunInfo */ reg = read_csr(dd, RCV_ERR_INFO); if (reg & RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK) { /* * if the RcvExcessBufferOverrun bit is set, save SC of * first pkt that encountered an excess buffer overrun */ u8 tmp = (u8)reg; tmp &= RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SC_SMASK; tmp <<= 2; rsp->excessive_buffer_overrun_ei.status_and_sc = tmp; /* set the status bit */ rsp->excessive_buffer_overrun_ei.status_and_sc |= 0x80; } rsp->port_xmit_constraint_ei.status = dd->err_info_xmit_constraint.status; rsp->port_xmit_constraint_ei.pkey = cpu_to_be16(dd->err_info_xmit_constraint.pkey); rsp->port_xmit_constraint_ei.slid = cpu_to_be32(dd->err_info_xmit_constraint.slid); rsp->port_rcv_constraint_ei.status = dd->err_info_rcv_constraint.status; rsp->port_rcv_constraint_ei.pkey = cpu_to_be16(dd->err_info_rcv_constraint.pkey); rsp->port_rcv_constraint_ei.slid = cpu_to_be32(dd->err_info_rcv_constraint.slid); /* UncorrectableErrorInfo */ rsp->uncorrectable_ei.status_and_code = dd->err_info_uncorrectable; /* FMConfigErrorInfo */ rsp->fm_config_ei.status_and_code = dd->err_info_fmconfig; if (resp_len) *resp_len += response_data_size; return reply((struct ib_mad_hdr *)pmp); } static int pma_set_opa_portstatus(struct opa_pma_mad *pmp, struct ib_device *ibdev, u32 port, u32 *resp_len) { struct opa_clear_port_status *req = (struct opa_clear_port_status *)pmp->data; struct hfi1_devdata *dd = dd_from_ibdev(ibdev); struct hfi1_ibport *ibp = to_iport(ibdev, port); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24; u64 portn = be64_to_cpu(req->port_select_mask[3]); u32 counter_select = be32_to_cpu(req->counter_select_mask); unsigned long vl_select_mask = VL_MASK_ALL; /* clear all per-vl cnts */ unsigned long vl; if ((nports != 1) || (portn != 1 << port)) { pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)pmp); } /* * only counters returned by pma_get_opa_portstatus() are * handled, so when pma_get_opa_portstatus() gets a fix, * the corresponding change should be made here as well. */ if (counter_select & CS_PORT_XMIT_DATA) write_dev_cntr(dd, C_DC_XMIT_FLITS, CNTR_INVALID_VL, 0); if (counter_select & CS_PORT_RCV_DATA) write_dev_cntr(dd, C_DC_RCV_FLITS, CNTR_INVALID_VL, 0); if (counter_select & CS_PORT_XMIT_PKTS) write_dev_cntr(dd, C_DC_XMIT_PKTS, CNTR_INVALID_VL, 0); if (counter_select & CS_PORT_RCV_PKTS) write_dev_cntr(dd, C_DC_RCV_PKTS, CNTR_INVALID_VL, 0); if (counter_select & CS_PORT_MCAST_XMIT_PKTS) write_dev_cntr(dd, C_DC_MC_XMIT_PKTS, CNTR_INVALID_VL, 0); if (counter_select & CS_PORT_MCAST_RCV_PKTS) write_dev_cntr(dd, C_DC_MC_RCV_PKTS, CNTR_INVALID_VL, 0); if (counter_select & CS_PORT_XMIT_WAIT) { write_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL, 0); ppd->port_vl_xmit_wait_last[C_VL_COUNT] = 0; ppd->vl_xmit_flit_cnt[C_VL_COUNT] = 0; } /* ignore cs_sw_portCongestion for HFIs */ if (counter_select & CS_PORT_RCV_FECN) write_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL, 0); if (counter_select & CS_PORT_RCV_BECN) write_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL, 0); /* ignore cs_port_xmit_time_cong for HFIs */ /* ignore cs_port_xmit_wasted_bw for now */ /* ignore cs_port_xmit_wait_data for now */ if (counter_select & CS_PORT_RCV_BUBBLE) write_dev_cntr(dd, C_DC_RCV_BBL, CNTR_INVALID_VL, 0); /* Only applicable for switch */ /* if (counter_select & CS_PORT_MARK_FECN) * write_csr(dd, DCC_PRF_PORT_MARK_FECN_CNT, 0); */ if (counter_select & CS_PORT_RCV_CONSTRAINT_ERRORS) write_port_cntr(ppd, C_SW_RCV_CSTR_ERR, CNTR_INVALID_VL, 0); /* ignore cs_port_rcv_switch_relay_errors for HFIs */ if (counter_select & CS_PORT_XMIT_DISCARDS) write_port_cntr(ppd, C_SW_XMIT_DSCD, CNTR_INVALID_VL, 0); if (counter_select & CS_PORT_XMIT_CONSTRAINT_ERRORS) write_port_cntr(ppd, C_SW_XMIT_CSTR_ERR, CNTR_INVALID_VL, 0); if (counter_select & CS_PORT_RCV_REMOTE_PHYSICAL_ERRORS) write_dev_cntr(dd, C_DC_RMT_PHY_ERR, CNTR_INVALID_VL, 0); if (counter_select & CS_LOCAL_LINK_INTEGRITY_ERRORS) write_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL, 0); if (counter_select & CS_LINK_ERROR_RECOVERY) { write_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL, 0); write_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL, 0); } if (counter_select & CS_PORT_RCV_ERRORS) write_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL, 0); if (counter_select & CS_EXCESSIVE_BUFFER_OVERRUNS) { write_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL, 0); dd->rcv_ovfl_cnt = 0; } if (counter_select & CS_FM_CONFIG_ERRORS) write_dev_cntr(dd, C_DC_FM_CFG_ERR, CNTR_INVALID_VL, 0); if (counter_select & CS_LINK_DOWNED) write_port_cntr(ppd, C_SW_LINK_DOWN, CNTR_INVALID_VL, 0); if (counter_select & CS_UNCORRECTABLE_ERRORS) write_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL, 0); for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) { if (counter_select & CS_PORT_XMIT_DATA) write_port_cntr(ppd, C_TX_FLIT_VL, idx_from_vl(vl), 0); if (counter_select & CS_PORT_RCV_DATA) write_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl), 0); if (counter_select & CS_PORT_XMIT_PKTS) write_port_cntr(ppd, C_TX_PKT_VL, idx_from_vl(vl), 0); if (counter_select & CS_PORT_RCV_PKTS) write_dev_cntr(dd, C_DC_RX_PKT_VL, idx_from_vl(vl), 0); if (counter_select & CS_PORT_XMIT_WAIT) { write_port_cntr(ppd, C_TX_WAIT_VL, idx_from_vl(vl), 0); ppd->port_vl_xmit_wait_last[idx_from_vl(vl)] = 0; ppd->vl_xmit_flit_cnt[idx_from_vl(vl)] = 0; } /* sw_port_vl_congestion is 0 for HFIs */ if (counter_select & CS_PORT_RCV_FECN) write_dev_cntr(dd, C_DC_RCV_FCN_VL, idx_from_vl(vl), 0); if (counter_select & CS_PORT_RCV_BECN) write_dev_cntr(dd, C_DC_RCV_BCN_VL, idx_from_vl(vl), 0); /* port_vl_xmit_time_cong is 0 for HFIs */ /* port_vl_xmit_wasted_bw ??? */ /* port_vl_xmit_wait_data - TXE (table 13-9 HFI spec) ??? */ if (counter_select & CS_PORT_RCV_BUBBLE) write_dev_cntr(dd, C_DC_RCV_BBL_VL, idx_from_vl(vl), 0); /* if (counter_select & CS_PORT_MARK_FECN) * write_csr(dd, DCC_PRF_PORT_VL_MARK_FECN_CNT + offset, 0); */ if (counter_select & C_SW_XMIT_DSCD_VL) write_port_cntr(ppd, C_SW_XMIT_DSCD_VL, idx_from_vl(vl), 0); } if (resp_len) *resp_len += sizeof(*req); return reply((struct ib_mad_hdr *)pmp); } static int pma_set_opa_errorinfo(struct opa_pma_mad *pmp, struct ib_device *ibdev, u32 port, u32 *resp_len) { struct _port_ei *rsp; struct opa_port_error_info_msg *req; struct hfi1_devdata *dd = dd_from_ibdev(ibdev); u64 port_mask; u32 num_ports; u32 port_num; u8 num_pslm; u32 error_info_select; req = (struct opa_port_error_info_msg *)pmp->data; rsp = &req->port; num_ports = OPA_AM_NPORT(be32_to_cpu(pmp->mad_hdr.attr_mod)); num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3])); memset(rsp, 0, sizeof(*rsp)); if (num_ports != 1 || num_ports != num_pslm) { pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)pmp); } /* * The bit set in the mask needs to be consistent with the port * the request came in on. */ port_mask = be64_to_cpu(req->port_select_mask[3]); port_num = find_first_bit((unsigned long *)&port_mask, sizeof(port_mask) * 8); if (port_num != port) { pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)pmp); } error_info_select = be32_to_cpu(req->error_info_select_mask); /* PortRcvErrorInfo */ if (error_info_select & ES_PORT_RCV_ERROR_INFO) /* turn off status bit */ dd->err_info_rcvport.status_and_code &= ~OPA_EI_STATUS_SMASK; /* ExcessiverBufferOverrunInfo */ if (error_info_select & ES_EXCESSIVE_BUFFER_OVERRUN_INFO) /* * status bit is essentially kept in the h/w - bit 5 of * RCV_ERR_INFO */ write_csr(dd, RCV_ERR_INFO, RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK); if (error_info_select & ES_PORT_XMIT_CONSTRAINT_ERROR_INFO) dd->err_info_xmit_constraint.status &= ~OPA_EI_STATUS_SMASK; if (error_info_select & ES_PORT_RCV_CONSTRAINT_ERROR_INFO) dd->err_info_rcv_constraint.status &= ~OPA_EI_STATUS_SMASK; /* UncorrectableErrorInfo */ if (error_info_select & ES_UNCORRECTABLE_ERROR_INFO) /* turn off status bit */ dd->err_info_uncorrectable &= ~OPA_EI_STATUS_SMASK; /* FMConfigErrorInfo */ if (error_info_select & ES_FM_CONFIG_ERROR_INFO) /* turn off status bit */ dd->err_info_fmconfig &= ~OPA_EI_STATUS_SMASK; if (resp_len) *resp_len += sizeof(*req); return reply((struct ib_mad_hdr *)pmp); } struct opa_congestion_info_attr { __be16 congestion_info; u8 control_table_cap; /* Multiple of 64 entry unit CCTs */ u8 congestion_log_length; } __packed; static int __subn_get_opa_cong_info(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u32 port, u32 *resp_len, u32 max_len) { struct opa_congestion_info_attr *p = (struct opa_congestion_info_attr *)data; struct hfi1_ibport *ibp = to_iport(ibdev, port); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); if (smp_length_check(sizeof(*p), max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } p->congestion_info = 0; p->control_table_cap = ppd->cc_max_table_entries; p->congestion_log_length = OPA_CONG_LOG_ELEMS; if (resp_len) *resp_len += sizeof(*p); return reply((struct ib_mad_hdr *)smp); } static int __subn_get_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u32 port, u32 *resp_len, u32 max_len) { int i; struct opa_congestion_setting_attr *p = (struct opa_congestion_setting_attr *)data; struct hfi1_ibport *ibp = to_iport(ibdev, port); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); struct opa_congestion_setting_entry_shadow *entries; struct cc_state *cc_state; if (smp_length_check(sizeof(*p), max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } rcu_read_lock(); cc_state = get_cc_state(ppd); if (!cc_state) { rcu_read_unlock(); return reply((struct ib_mad_hdr *)smp); } entries = cc_state->cong_setting.entries; p->port_control = cpu_to_be16(cc_state->cong_setting.port_control); p->control_map = cpu_to_be32(cc_state->cong_setting.control_map); for (i = 0; i < OPA_MAX_SLS; i++) { p->entries[i].ccti_increase = entries[i].ccti_increase; p->entries[i].ccti_timer = cpu_to_be16(entries[i].ccti_timer); p->entries[i].trigger_threshold = entries[i].trigger_threshold; p->entries[i].ccti_min = entries[i].ccti_min; } rcu_read_unlock(); if (resp_len) *resp_len += sizeof(*p); return reply((struct ib_mad_hdr *)smp); } /* * Apply congestion control information stored in the ppd to the * active structure. */ static void apply_cc_state(struct hfi1_pportdata *ppd) { struct cc_state *old_cc_state, *new_cc_state; new_cc_state = kzalloc(sizeof(*new_cc_state), GFP_KERNEL); if (!new_cc_state) return; /* * Hold the lock for updating *and* to prevent ppd information * from changing during the update. */ spin_lock(&ppd->cc_state_lock); old_cc_state = get_cc_state_protected(ppd); if (!old_cc_state) { /* never active, or shutting down */ spin_unlock(&ppd->cc_state_lock); kfree(new_cc_state); return; } *new_cc_state = *old_cc_state; if (ppd->total_cct_entry) new_cc_state->cct.ccti_limit = ppd->total_cct_entry - 1; else new_cc_state->cct.ccti_limit = 0; memcpy(new_cc_state->cct.entries, ppd->ccti_entries, ppd->total_cct_entry * sizeof(struct ib_cc_table_entry)); new_cc_state->cong_setting.port_control = IB_CC_CCS_PC_SL_BASED; new_cc_state->cong_setting.control_map = ppd->cc_sl_control_map; memcpy(new_cc_state->cong_setting.entries, ppd->congestion_entries, OPA_MAX_SLS * sizeof(struct opa_congestion_setting_entry)); rcu_assign_pointer(ppd->cc_state, new_cc_state); spin_unlock(&ppd->cc_state_lock); kfree_rcu(old_cc_state, rcu); } static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u32 port, u32 *resp_len, u32 max_len) { struct opa_congestion_setting_attr *p = (struct opa_congestion_setting_attr *)data; struct hfi1_ibport *ibp = to_iport(ibdev, port); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); struct opa_congestion_setting_entry_shadow *entries; int i; if (smp_length_check(sizeof(*p), max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } /* * Save details from packet into the ppd. Hold the cc_state_lock so * our information is consistent with anyone trying to apply the state. */ spin_lock(&ppd->cc_state_lock); ppd->cc_sl_control_map = be32_to_cpu(p->control_map); entries = ppd->congestion_entries; for (i = 0; i < OPA_MAX_SLS; i++) { entries[i].ccti_increase = p->entries[i].ccti_increase; entries[i].ccti_timer = be16_to_cpu(p->entries[i].ccti_timer); entries[i].trigger_threshold = p->entries[i].trigger_threshold; entries[i].ccti_min = p->entries[i].ccti_min; } spin_unlock(&ppd->cc_state_lock); /* now apply the information */ apply_cc_state(ppd); return __subn_get_opa_cong_setting(smp, am, data, ibdev, port, resp_len, max_len); } static int __subn_get_opa_hfi1_cong_log(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u32 port, u32 *resp_len, u32 max_len) { struct hfi1_ibport *ibp = to_iport(ibdev, port); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); struct opa_hfi1_cong_log *cong_log = (struct opa_hfi1_cong_log *)data; u64 ts; int i; if (am || smp_length_check(sizeof(*cong_log), max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } spin_lock_irq(&ppd->cc_log_lock); cong_log->log_type = OPA_CC_LOG_TYPE_HFI; cong_log->congestion_flags = 0; cong_log->threshold_event_counter = cpu_to_be16(ppd->threshold_event_counter); memcpy(cong_log->threshold_cong_event_map, ppd->threshold_cong_event_map, sizeof(cong_log->threshold_cong_event_map)); /* keep timestamp in units of 1.024 usec */ ts = ktime_get_ns() / 1024; cong_log->current_time_stamp = cpu_to_be32(ts); for (i = 0; i < OPA_CONG_LOG_ELEMS; i++) { struct opa_hfi1_cong_log_event_internal *cce = &ppd->cc_events[ppd->cc_mad_idx++]; if (ppd->cc_mad_idx == OPA_CONG_LOG_ELEMS) ppd->cc_mad_idx = 0; /* * Entries which are older than twice the time * required to wrap the counter are supposed to * be zeroed (CA10-49 IBTA, release 1.2.1, V1). */ if ((ts - cce->timestamp) / 2 > U32_MAX) continue; memcpy(cong_log->events[i].local_qp_cn_entry, &cce->lqpn, 3); memcpy(cong_log->events[i].remote_qp_number_cn_entry, &cce->rqpn, 3); cong_log->events[i].sl_svc_type_cn_entry = ((cce->sl & 0x1f) << 3) | (cce->svc_type & 0x7); cong_log->events[i].remote_lid_cn_entry = cpu_to_be32(cce->rlid); cong_log->events[i].timestamp_cn_entry = cpu_to_be32(cce->timestamp); } /* * Reset threshold_cong_event_map, and threshold_event_counter * to 0 when log is read. */ memset(ppd->threshold_cong_event_map, 0x0, sizeof(ppd->threshold_cong_event_map)); ppd->threshold_event_counter = 0; spin_unlock_irq(&ppd->cc_log_lock); if (resp_len) *resp_len += sizeof(struct opa_hfi1_cong_log); return reply((struct ib_mad_hdr *)smp); } static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u32 port, u32 *resp_len, u32 max_len) { struct ib_cc_table_attr *cc_table_attr = (struct ib_cc_table_attr *)data; struct hfi1_ibport *ibp = to_iport(ibdev, port); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); u32 start_block = OPA_AM_START_BLK(am); u32 n_blocks = OPA_AM_NBLK(am); struct ib_cc_table_entry_shadow *entries; int i, j; u32 sentry, eentry; struct cc_state *cc_state; u32 size = sizeof(u16) * (IB_CCT_ENTRIES * n_blocks + 1); /* sanity check n_blocks, start_block */ if (n_blocks == 0 || smp_length_check(size, max_len) || start_block + n_blocks > ppd->cc_max_table_entries) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } rcu_read_lock(); cc_state = get_cc_state(ppd); if (!cc_state) { rcu_read_unlock(); return reply((struct ib_mad_hdr *)smp); } sentry = start_block * IB_CCT_ENTRIES; eentry = sentry + (IB_CCT_ENTRIES * n_blocks); cc_table_attr->ccti_limit = cpu_to_be16(cc_state->cct.ccti_limit); entries = cc_state->cct.entries; /* return n_blocks, though the last block may not be full */ for (j = 0, i = sentry; i < eentry; j++, i++) cc_table_attr->ccti_entries[j].entry = cpu_to_be16(entries[i].entry); rcu_read_unlock(); if (resp_len) *resp_len += size; return reply((struct ib_mad_hdr *)smp); } static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u32 port, u32 *resp_len, u32 max_len) { struct ib_cc_table_attr *p = (struct ib_cc_table_attr *)data; struct hfi1_ibport *ibp = to_iport(ibdev, port); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); u32 start_block = OPA_AM_START_BLK(am); u32 n_blocks = OPA_AM_NBLK(am); struct ib_cc_table_entry_shadow *entries; int i, j; u32 sentry, eentry; u16 ccti_limit; u32 size = sizeof(u16) * (IB_CCT_ENTRIES * n_blocks + 1); /* sanity check n_blocks, start_block */ if (n_blocks == 0 || smp_length_check(size, max_len) || start_block + n_blocks > ppd->cc_max_table_entries) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } sentry = start_block * IB_CCT_ENTRIES; eentry = sentry + ((n_blocks - 1) * IB_CCT_ENTRIES) + (be16_to_cpu(p->ccti_limit)) % IB_CCT_ENTRIES + 1; /* sanity check ccti_limit */ ccti_limit = be16_to_cpu(p->ccti_limit); if (ccti_limit + 1 > eentry) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } /* * Save details from packet into the ppd. Hold the cc_state_lock so * our information is consistent with anyone trying to apply the state. */ spin_lock(&ppd->cc_state_lock); ppd->total_cct_entry = ccti_limit + 1; entries = ppd->ccti_entries; for (j = 0, i = sentry; i < eentry; j++, i++) entries[i].entry = be16_to_cpu(p->ccti_entries[j].entry); spin_unlock(&ppd->cc_state_lock); /* now apply the information */ apply_cc_state(ppd); return __subn_get_opa_cc_table(smp, am, data, ibdev, port, resp_len, max_len); } struct opa_led_info { __be32 rsvd_led_mask; __be32 rsvd; }; #define OPA_LED_SHIFT 31 #define OPA_LED_MASK BIT(OPA_LED_SHIFT) static int __subn_get_opa_led_info(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u32 port, u32 *resp_len, u32 max_len) { struct hfi1_devdata *dd = dd_from_ibdev(ibdev); struct hfi1_pportdata *ppd = dd->pport; struct opa_led_info *p = (struct opa_led_info *)data; u32 nport = OPA_AM_NPORT(am); u32 is_beaconing_active; if (nport != 1 || smp_length_check(sizeof(*p), max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } /* * This pairs with the memory barrier in hfi1_start_led_override to * ensure that we read the correct state of LED beaconing represented * by led_override_timer_active */ smp_rmb(); is_beaconing_active = !!atomic_read(&ppd->led_override_timer_active); p->rsvd_led_mask = cpu_to_be32(is_beaconing_active << OPA_LED_SHIFT); if (resp_len) *resp_len += sizeof(struct opa_led_info); return reply((struct ib_mad_hdr *)smp); } static int __subn_set_opa_led_info(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u32 port, u32 *resp_len, u32 max_len) { struct hfi1_devdata *dd = dd_from_ibdev(ibdev); struct opa_led_info *p = (struct opa_led_info *)data; u32 nport = OPA_AM_NPORT(am); int on = !!(be32_to_cpu(p->rsvd_led_mask) & OPA_LED_MASK); if (nport != 1 || smp_length_check(sizeof(*p), max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } if (on) hfi1_start_led_override(dd->pport, 2000, 1500); else shutdown_led_override(dd->pport); return __subn_get_opa_led_info(smp, am, data, ibdev, port, resp_len, max_len); } static int subn_get_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u32 port, u32 *resp_len, u32 max_len) { int ret; struct hfi1_ibport *ibp = to_iport(ibdev, port); switch (attr_id) { case IB_SMP_ATTR_NODE_DESC: ret = __subn_get_opa_nodedesc(smp, am, data, ibdev, port, resp_len, max_len); break; case IB_SMP_ATTR_NODE_INFO: ret = __subn_get_opa_nodeinfo(smp, am, data, ibdev, port, resp_len, max_len); break; case IB_SMP_ATTR_PORT_INFO: ret = __subn_get_opa_portinfo(smp, am, data, ibdev, port, resp_len, max_len); break; case IB_SMP_ATTR_PKEY_TABLE: ret = __subn_get_opa_pkeytable(smp, am, data, ibdev, port, resp_len, max_len); break; case OPA_ATTRIB_ID_SL_TO_SC_MAP: ret = __subn_get_opa_sl_to_sc(smp, am, data, ibdev, port, resp_len, max_len); break; case OPA_ATTRIB_ID_SC_TO_SL_MAP: ret = __subn_get_opa_sc_to_sl(smp, am, data, ibdev, port, resp_len, max_len); break; case OPA_ATTRIB_ID_SC_TO_VLT_MAP: ret = __subn_get_opa_sc_to_vlt(smp, am, data, ibdev, port, resp_len, max_len); break; case OPA_ATTRIB_ID_SC_TO_VLNT_MAP: ret = __subn_get_opa_sc_to_vlnt(smp, am, data, ibdev, port, resp_len, max_len); break; case OPA_ATTRIB_ID_PORT_STATE_INFO: ret = __subn_get_opa_psi(smp, am, data, ibdev, port, resp_len, max_len); break; case OPA_ATTRIB_ID_BUFFER_CONTROL_TABLE: ret = __subn_get_opa_bct(smp, am, data, ibdev, port, resp_len, max_len); break; case OPA_ATTRIB_ID_CABLE_INFO: ret = __subn_get_opa_cable_info(smp, am, data, ibdev, port, resp_len, max_len); break; case IB_SMP_ATTR_VL_ARB_TABLE: ret = __subn_get_opa_vl_arb(smp, am, data, ibdev, port, resp_len, max_len); break; case OPA_ATTRIB_ID_CONGESTION_INFO: ret = __subn_get_opa_cong_info(smp, am, data, ibdev, port, resp_len, max_len); break; case OPA_ATTRIB_ID_HFI_CONGESTION_SETTING: ret = __subn_get_opa_cong_setting(smp, am, data, ibdev, port, resp_len, max_len); break; case OPA_ATTRIB_ID_HFI_CONGESTION_LOG: ret = __subn_get_opa_hfi1_cong_log(smp, am, data, ibdev, port, resp_len, max_len); break; case OPA_ATTRIB_ID_CONGESTION_CONTROL_TABLE: ret = __subn_get_opa_cc_table(smp, am, data, ibdev, port, resp_len, max_len); break; case IB_SMP_ATTR_LED_INFO: ret = __subn_get_opa_led_info(smp, am, data, ibdev, port, resp_len, max_len); break; case IB_SMP_ATTR_SM_INFO: if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED) return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; if (ibp->rvp.port_cap_flags & IB_PORT_SM) return IB_MAD_RESULT_SUCCESS; fallthrough; default: smp->status |= IB_SMP_UNSUP_METH_ATTR; ret = reply((struct ib_mad_hdr *)smp); break; } return ret; } static int subn_set_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u32 port, u32 *resp_len, u32 max_len, int local_mad) { int ret; struct hfi1_ibport *ibp = to_iport(ibdev, port); switch (attr_id) { case IB_SMP_ATTR_PORT_INFO: ret = __subn_set_opa_portinfo(smp, am, data, ibdev, port, resp_len, max_len, local_mad); break; case IB_SMP_ATTR_PKEY_TABLE: ret = __subn_set_opa_pkeytable(smp, am, data, ibdev, port, resp_len, max_len); break; case OPA_ATTRIB_ID_SL_TO_SC_MAP: ret = __subn_set_opa_sl_to_sc(smp, am, data, ibdev, port, resp_len, max_len); break; case OPA_ATTRIB_ID_SC_TO_SL_MAP: ret = __subn_set_opa_sc_to_sl(smp, am, data, ibdev, port, resp_len, max_len); break; case OPA_ATTRIB_ID_SC_TO_VLT_MAP: ret = __subn_set_opa_sc_to_vlt(smp, am, data, ibdev, port, resp_len, max_len); break; case OPA_ATTRIB_ID_SC_TO_VLNT_MAP: ret = __subn_set_opa_sc_to_vlnt(smp, am, data, ibdev, port, resp_len, max_len); break; case OPA_ATTRIB_ID_PORT_STATE_INFO: ret = __subn_set_opa_psi(smp, am, data, ibdev, port, resp_len, max_len, local_mad); break; case OPA_ATTRIB_ID_BUFFER_CONTROL_TABLE: ret = __subn_set_opa_bct(smp, am, data, ibdev, port, resp_len, max_len); break; case IB_SMP_ATTR_VL_ARB_TABLE: ret = __subn_set_opa_vl_arb(smp, am, data, ibdev, port, resp_len, max_len); break; case OPA_ATTRIB_ID_HFI_CONGESTION_SETTING: ret = __subn_set_opa_cong_setting(smp, am, data, ibdev, port, resp_len, max_len); break; case OPA_ATTRIB_ID_CONGESTION_CONTROL_TABLE: ret = __subn_set_opa_cc_table(smp, am, data, ibdev, port, resp_len, max_len); break; case IB_SMP_ATTR_LED_INFO: ret = __subn_set_opa_led_info(smp, am, data, ibdev, port, resp_len, max_len); break; case IB_SMP_ATTR_SM_INFO: if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED) return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; if (ibp->rvp.port_cap_flags & IB_PORT_SM) return IB_MAD_RESULT_SUCCESS; fallthrough; default: smp->status |= IB_SMP_UNSUP_METH_ATTR; ret = reply((struct ib_mad_hdr *)smp); break; } return ret; } static inline void set_aggr_error(struct opa_aggregate *ag) { ag->err_reqlength |= cpu_to_be16(0x8000); } static int subn_get_opa_aggregate(struct opa_smp *smp, struct ib_device *ibdev, u32 port, u32 *resp_len) { int i; u32 num_attr = be32_to_cpu(smp->attr_mod) & 0x000000ff; u8 *next_smp = opa_get_smp_data(smp); if (num_attr < 1 || num_attr > 117) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } for (i = 0; i < num_attr; i++) { struct opa_aggregate *agg; size_t agg_data_len; size_t agg_size; u32 am; agg = (struct opa_aggregate *)next_smp; agg_data_len = (be16_to_cpu(agg->err_reqlength) & 0x007f) * 8; agg_size = sizeof(*agg) + agg_data_len; am = be32_to_cpu(agg->attr_mod); *resp_len += agg_size; if (next_smp + agg_size > ((u8 *)smp) + sizeof(*smp)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } /* zero the payload for this segment */ memset(next_smp + sizeof(*agg), 0, agg_data_len); (void)subn_get_opa_sma(agg->attr_id, smp, am, agg->data, ibdev, port, NULL, (u32)agg_data_len); if (smp->status & IB_SMP_INVALID_FIELD) break; if (smp->status & ~IB_SMP_DIRECTION) { set_aggr_error(agg); return reply((struct ib_mad_hdr *)smp); } next_smp += agg_size; } return reply((struct ib_mad_hdr *)smp); } static int subn_set_opa_aggregate(struct opa_smp *smp, struct ib_device *ibdev, u32 port, u32 *resp_len, int local_mad) { int i; u32 num_attr = be32_to_cpu(smp->attr_mod) & 0x000000ff; u8 *next_smp = opa_get_smp_data(smp); if (num_attr < 1 || num_attr > 117) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } for (i = 0; i < num_attr; i++) { struct opa_aggregate *agg; size_t agg_data_len; size_t agg_size; u32 am; agg = (struct opa_aggregate *)next_smp; agg_data_len = (be16_to_cpu(agg->err_reqlength) & 0x007f) * 8; agg_size = sizeof(*agg) + agg_data_len; am = be32_to_cpu(agg->attr_mod); *resp_len += agg_size; if (next_smp + agg_size > ((u8 *)smp) + sizeof(*smp)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } (void)subn_set_opa_sma(agg->attr_id, smp, am, agg->data, ibdev, port, NULL, (u32)agg_data_len, local_mad); if (smp->status & IB_SMP_INVALID_FIELD) break; if (smp->status & ~IB_SMP_DIRECTION) { set_aggr_error(agg); return reply((struct ib_mad_hdr *)smp); } next_smp += agg_size; } return reply((struct ib_mad_hdr *)smp); } /* * OPAv1 specifies that, on the transition to link up, these counters * are cleared: * PortRcvErrors [*] * LinkErrorRecovery * LocalLinkIntegrityErrors * ExcessiveBufferOverruns [*] * * [*] Error info associated with these counters is retained, but the * error info status is reset to 0. */ void clear_linkup_counters(struct hfi1_devdata *dd) { /* PortRcvErrors */ write_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL, 0); dd->err_info_rcvport.status_and_code &= ~OPA_EI_STATUS_SMASK; /* LinkErrorRecovery */ write_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL, 0); write_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL, 0); /* LocalLinkIntegrityErrors */ write_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL, 0); /* ExcessiveBufferOverruns */ write_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL, 0); dd->rcv_ovfl_cnt = 0; dd->err_info_xmit_constraint.status &= ~OPA_EI_STATUS_SMASK; } static int is_full_mgmt_pkey_in_table(struct hfi1_ibport *ibp) { unsigned int i; struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); for (i = 0; i < ARRAY_SIZE(ppd->pkeys); ++i) if (ppd->pkeys[i] == FULL_MGMT_P_KEY) return 1; return 0; } /* * is_local_mad() returns 1 if 'mad' is sent from, and destined to the * local node, 0 otherwise. */ static int is_local_mad(struct hfi1_ibport *ibp, const struct opa_mad *mad, const struct ib_wc *in_wc) { struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); const struct opa_smp *smp = (const struct opa_smp *)mad; if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { return (smp->hop_cnt == 0 && smp->route.dr.dr_slid == OPA_LID_PERMISSIVE && smp->route.dr.dr_dlid == OPA_LID_PERMISSIVE); } return (in_wc->slid == ppd->lid); } /* * opa_local_smp_check() should only be called on MADs for which * is_local_mad() returns true. It applies the SMP checks that are * specific to SMPs which are sent from, and destined to this node. * opa_local_smp_check() returns 0 if the SMP passes its checks, 1 * otherwise. * * SMPs which arrive from other nodes are instead checked by * opa_smp_check(). */ static int opa_local_smp_check(struct hfi1_ibport *ibp, const struct ib_wc *in_wc) { struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); u16 pkey; if (in_wc->pkey_index >= ARRAY_SIZE(ppd->pkeys)) return 1; pkey = ppd->pkeys[in_wc->pkey_index]; /* * We need to do the "node-local" checks specified in OPAv1, * rev 0.90, section 9.10.26, which are: * - pkey is 0x7fff, or 0xffff * - Source QPN == 0 || Destination QPN == 0 * - the MAD header's management class is either * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE or * IB_MGMT_CLASS_SUBN_LID_ROUTED * - SLID != 0 * * However, we know (and so don't need to check again) that, * for local SMPs, the MAD stack passes MADs with: * - Source QPN of 0 * - MAD mgmt_class is IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE * - SLID is either: OPA_LID_PERMISSIVE (0xFFFFFFFF), or * our own port's lid * */ if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY) return 0; ingress_pkey_table_fail(ppd, pkey, in_wc->slid); return 1; } /** * hfi1_pkey_validation_pma - It validates PKEYs for incoming PMA MAD packets. * @ibp: IB port data * @in_mad: MAD packet with header and data * @in_wc: Work completion data such as source LID, port number, etc. * * These are all the possible logic rules for validating a pkey: * * a) If pkey neither FULL_MGMT_P_KEY nor LIM_MGMT_P_KEY, * and NOT self-originated packet: * Drop MAD packet as it should always be part of the * management partition unless it's a self-originated packet. * * b) If pkey_index -> FULL_MGMT_P_KEY, and LIM_MGMT_P_KEY in pkey table: * The packet is coming from a management node and the receiving node * is also a management node, so it is safe for the packet to go through. * * c) If pkey_index -> FULL_MGMT_P_KEY, and LIM_MGMT_P_KEY is NOT in pkey table: * Drop the packet as LIM_MGMT_P_KEY should always be in the pkey table. * It could be an FM misconfiguration. * * d) If pkey_index -> LIM_MGMT_P_KEY and FULL_MGMT_P_KEY is NOT in pkey table: * It is safe for the packet to go through since a non-management node is * talking to another non-management node. * * e) If pkey_index -> LIM_MGMT_P_KEY and FULL_MGMT_P_KEY in pkey table: * Drop the packet because a non-management node is talking to a * management node, and it could be an attack. * * For the implementation, these rules can be simplied to only checking * for (a) and (e). There's no need to check for rule (b) as * the packet doesn't need to be dropped. Rule (c) is not possible in * the driver as LIM_MGMT_P_KEY is always in the pkey table. * * Return: * 0 - pkey is okay, -EINVAL it's a bad pkey */ static int hfi1_pkey_validation_pma(struct hfi1_ibport *ibp, const struct opa_mad *in_mad, const struct ib_wc *in_wc) { u16 pkey_value = hfi1_lookup_pkey_value(ibp, in_wc->pkey_index); /* Rule (a) from above */ if (!is_local_mad(ibp, in_mad, in_wc) && pkey_value != LIM_MGMT_P_KEY && pkey_value != FULL_MGMT_P_KEY) return -EINVAL; /* Rule (e) from above */ if (pkey_value == LIM_MGMT_P_KEY && is_full_mgmt_pkey_in_table(ibp)) return -EINVAL; return 0; } static int process_subn_opa(struct ib_device *ibdev, int mad_flags, u32 port, const struct opa_mad *in_mad, struct opa_mad *out_mad, u32 *resp_len, int local_mad) { struct opa_smp *smp = (struct opa_smp *)out_mad; struct hfi1_ibport *ibp = to_iport(ibdev, port); u8 *data; u32 am, data_size; __be16 attr_id; int ret; *out_mad = *in_mad; data = opa_get_smp_data(smp); data_size = (u32)opa_get_smp_data_size(smp); am = be32_to_cpu(smp->attr_mod); attr_id = smp->attr_id; if (smp->class_version != OPA_SM_CLASS_VERSION) { smp->status |= IB_SMP_UNSUP_VERSION; ret = reply((struct ib_mad_hdr *)smp); return ret; } ret = check_mkey(ibp, (struct ib_mad_hdr *)smp, mad_flags, smp->mkey, smp->route.dr.dr_slid, smp->route.dr.return_path, smp->hop_cnt); if (ret) { u32 port_num = be32_to_cpu(smp->attr_mod); /* * If this is a get/set portinfo, we already check the * M_Key if the MAD is for another port and the M_Key * is OK on the receiving port. This check is needed * to increment the error counters when the M_Key * fails to match on *both* ports. */ if (attr_id == IB_SMP_ATTR_PORT_INFO && (smp->method == IB_MGMT_METHOD_GET || smp->method == IB_MGMT_METHOD_SET) && port_num && port_num <= ibdev->phys_port_cnt && port != port_num) (void)check_mkey(to_iport(ibdev, port_num), (struct ib_mad_hdr *)smp, 0, smp->mkey, smp->route.dr.dr_slid, smp->route.dr.return_path, smp->hop_cnt); ret = IB_MAD_RESULT_FAILURE; return ret; } *resp_len = opa_get_smp_header_size(smp); switch (smp->method) { case IB_MGMT_METHOD_GET: switch (attr_id) { default: clear_opa_smp_data(smp); ret = subn_get_opa_sma(attr_id, smp, am, data, ibdev, port, resp_len, data_size); break; case OPA_ATTRIB_ID_AGGREGATE: ret = subn_get_opa_aggregate(smp, ibdev, port, resp_len); break; } break; case IB_MGMT_METHOD_SET: switch (attr_id) { default: ret = subn_set_opa_sma(attr_id, smp, am, data, ibdev, port, resp_len, data_size, local_mad); break; case OPA_ATTRIB_ID_AGGREGATE: ret = subn_set_opa_aggregate(smp, ibdev, port, resp_len, local_mad); break; } break; case IB_MGMT_METHOD_TRAP: case IB_MGMT_METHOD_REPORT: case IB_MGMT_METHOD_REPORT_RESP: case IB_MGMT_METHOD_GET_RESP: /* * The ib_mad module will call us to process responses * before checking for other consumers. * Just tell the caller to process it normally. */ ret = IB_MAD_RESULT_SUCCESS; break; case IB_MGMT_METHOD_TRAP_REPRESS: subn_handle_opa_trap_repress(ibp, smp); /* Always successful */ ret = IB_MAD_RESULT_SUCCESS; break; default: smp->status |= IB_SMP_UNSUP_METHOD; ret = reply((struct ib_mad_hdr *)smp); break; } return ret; } static int process_subn(struct ib_device *ibdev, int mad_flags, u32 port, const struct ib_mad *in_mad, struct ib_mad *out_mad) { struct ib_smp *smp = (struct ib_smp *)out_mad; struct hfi1_ibport *ibp = to_iport(ibdev, port); int ret; *out_mad = *in_mad; if (smp->class_version != 1) { smp->status |= IB_SMP_UNSUP_VERSION; ret = reply((struct ib_mad_hdr *)smp); return ret; } ret = check_mkey(ibp, (struct ib_mad_hdr *)smp, mad_flags, smp->mkey, (__force __be32)smp->dr_slid, smp->return_path, smp->hop_cnt); if (ret) { u32 port_num = be32_to_cpu(smp->attr_mod); /* * If this is a get/set portinfo, we already check the * M_Key if the MAD is for another port and the M_Key * is OK on the receiving port. This check is needed * to increment the error counters when the M_Key * fails to match on *both* ports. */ if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO && (smp->method == IB_MGMT_METHOD_GET || smp->method == IB_MGMT_METHOD_SET) && port_num && port_num <= ibdev->phys_port_cnt && port != port_num) (void)check_mkey(to_iport(ibdev, port_num), (struct ib_mad_hdr *)smp, 0, smp->mkey, (__force __be32)smp->dr_slid, smp->return_path, smp->hop_cnt); ret = IB_MAD_RESULT_FAILURE; return ret; } switch (smp->method) { case IB_MGMT_METHOD_GET: switch (smp->attr_id) { case IB_SMP_ATTR_NODE_INFO: ret = subn_get_nodeinfo(smp, ibdev, port); break; default: smp->status |= IB_SMP_UNSUP_METH_ATTR; ret = reply((struct ib_mad_hdr *)smp); break; } break; } return ret; } static int process_perf(struct ib_device *ibdev, u32 port, const struct ib_mad *in_mad, struct ib_mad *out_mad) { struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad; struct ib_class_port_info *cpi = (struct ib_class_port_info *) &pmp->data; int ret = IB_MAD_RESULT_FAILURE; *out_mad = *in_mad; if (pmp->mad_hdr.class_version != 1) { pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION; ret = reply((struct ib_mad_hdr *)pmp); return ret; } switch (pmp->mad_hdr.method) { case IB_MGMT_METHOD_GET: switch (pmp->mad_hdr.attr_id) { case IB_PMA_PORT_COUNTERS: ret = pma_get_ib_portcounters(pmp, ibdev, port); break; case IB_PMA_PORT_COUNTERS_EXT: ret = pma_get_ib_portcounters_ext(pmp, ibdev, port); break; case IB_PMA_CLASS_PORT_INFO: cpi->capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH; ret = reply((struct ib_mad_hdr *)pmp); break; default: pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR; ret = reply((struct ib_mad_hdr *)pmp); break; } break; case IB_MGMT_METHOD_SET: if (pmp->mad_hdr.attr_id) { pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR; ret = reply((struct ib_mad_hdr *)pmp); } break; case IB_MGMT_METHOD_TRAP: case IB_MGMT_METHOD_GET_RESP: /* * The ib_mad module will call us to process responses * before checking for other consumers. * Just tell the caller to process it normally. */ ret = IB_MAD_RESULT_SUCCESS; break; default: pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD; ret = reply((struct ib_mad_hdr *)pmp); break; } return ret; } static int process_perf_opa(struct ib_device *ibdev, u32 port, const struct opa_mad *in_mad, struct opa_mad *out_mad, u32 *resp_len) { struct opa_pma_mad *pmp = (struct opa_pma_mad *)out_mad; int ret; *out_mad = *in_mad; if (pmp->mad_hdr.class_version != OPA_SM_CLASS_VERSION) { pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION; return reply((struct ib_mad_hdr *)pmp); } *resp_len = sizeof(pmp->mad_hdr); switch (pmp->mad_hdr.method) { case IB_MGMT_METHOD_GET: switch (pmp->mad_hdr.attr_id) { case IB_PMA_CLASS_PORT_INFO: ret = pma_get_opa_classportinfo(pmp, ibdev, resp_len); break; case OPA_PM_ATTRIB_ID_PORT_STATUS: ret = pma_get_opa_portstatus(pmp, ibdev, port, resp_len); break; case OPA_PM_ATTRIB_ID_DATA_PORT_COUNTERS: ret = pma_get_opa_datacounters(pmp, ibdev, port, resp_len); break; case OPA_PM_ATTRIB_ID_ERROR_PORT_COUNTERS: ret = pma_get_opa_porterrors(pmp, ibdev, port, resp_len); break; case OPA_PM_ATTRIB_ID_ERROR_INFO: ret = pma_get_opa_errorinfo(pmp, ibdev, port, resp_len); break; default: pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR; ret = reply((struct ib_mad_hdr *)pmp); break; } break; case IB_MGMT_METHOD_SET: switch (pmp->mad_hdr.attr_id) { case OPA_PM_ATTRIB_ID_CLEAR_PORT_STATUS: ret = pma_set_opa_portstatus(pmp, ibdev, port, resp_len); break; case OPA_PM_ATTRIB_ID_ERROR_INFO: ret = pma_set_opa_errorinfo(pmp, ibdev, port, resp_len); break; default: pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR; ret = reply((struct ib_mad_hdr *)pmp); break; } break; case IB_MGMT_METHOD_TRAP: case IB_MGMT_METHOD_GET_RESP: /* * The ib_mad module will call us to process responses * before checking for other consumers. * Just tell the caller to process it normally. */ ret = IB_MAD_RESULT_SUCCESS; break; default: pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD; ret = reply((struct ib_mad_hdr *)pmp); break; } return ret; } static int hfi1_process_opa_mad(struct ib_device *ibdev, int mad_flags, u32 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh, const struct opa_mad *in_mad, struct opa_mad *out_mad, size_t *out_mad_size, u16 *out_mad_pkey_index) { int ret; int pkey_idx; int local_mad = 0; u32 resp_len = in_wc->byte_len - sizeof(*in_grh); struct hfi1_ibport *ibp = to_iport(ibdev, port); pkey_idx = hfi1_lookup_pkey_idx(ibp, LIM_MGMT_P_KEY); if (pkey_idx < 0) { pr_warn("failed to find limited mgmt pkey, defaulting 0x%x\n", hfi1_get_pkey(ibp, 1)); pkey_idx = 1; } *out_mad_pkey_index = (u16)pkey_idx; switch (in_mad->mad_hdr.mgmt_class) { case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: case IB_MGMT_CLASS_SUBN_LID_ROUTED: local_mad = is_local_mad(ibp, in_mad, in_wc); if (local_mad) { ret = opa_local_smp_check(ibp, in_wc); if (ret) return IB_MAD_RESULT_FAILURE; } ret = process_subn_opa(ibdev, mad_flags, port, in_mad, out_mad, &resp_len, local_mad); goto bail; case IB_MGMT_CLASS_PERF_MGMT: ret = hfi1_pkey_validation_pma(ibp, in_mad, in_wc); if (ret) return IB_MAD_RESULT_FAILURE; ret = process_perf_opa(ibdev, port, in_mad, out_mad, &resp_len); goto bail; default: ret = IB_MAD_RESULT_SUCCESS; } bail: if (ret & IB_MAD_RESULT_REPLY) *out_mad_size = round_up(resp_len, 8); else if (ret & IB_MAD_RESULT_SUCCESS) *out_mad_size = in_wc->byte_len - sizeof(struct ib_grh); return ret; } static int hfi1_process_ib_mad(struct ib_device *ibdev, int mad_flags, u32 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh, const struct ib_mad *in_mad, struct ib_mad *out_mad) { int ret; switch (in_mad->mad_hdr.mgmt_class) { case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: case IB_MGMT_CLASS_SUBN_LID_ROUTED: ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad); break; case IB_MGMT_CLASS_PERF_MGMT: ret = process_perf(ibdev, port, in_mad, out_mad); break; default: ret = IB_MAD_RESULT_SUCCESS; break; } return ret; } /** * hfi1_process_mad - process an incoming MAD packet * @ibdev: the infiniband device this packet came in on * @mad_flags: MAD flags * @port: the port number this packet came in on * @in_wc: the work completion entry for this packet * @in_grh: the global route header for this packet * @in_mad: the incoming MAD * @out_mad: any outgoing MAD reply * @out_mad_size: size of the outgoing MAD reply * @out_mad_pkey_index: used to apss back the packet key index * * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not * interested in processing. * * Note that the verbs framework has already done the MAD sanity checks, * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE * MADs. * * This is called by the ib_mad module. */ int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u32 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh, const struct ib_mad *in_mad, struct ib_mad *out_mad, size_t *out_mad_size, u16 *out_mad_pkey_index) { switch (in_mad->mad_hdr.base_version) { case OPA_MGMT_BASE_VERSION: return hfi1_process_opa_mad(ibdev, mad_flags, port, in_wc, in_grh, (struct opa_mad *)in_mad, (struct opa_mad *)out_mad, out_mad_size, out_mad_pkey_index); case IB_MGMT_BASE_VERSION: return hfi1_process_ib_mad(ibdev, mad_flags, port, in_wc, in_grh, in_mad, out_mad); default: break; } return IB_MAD_RESULT_FAILURE; }
linux-master
drivers/infiniband/hw/hfi1/mad.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2015 - 2018 Intel Corporation. */ #include <linux/spinlock.h> #include <linux/seqlock.h> #include <linux/netdevice.h> #include <linux/moduleparam.h> #include <linux/bitops.h> #include <linux/timer.h> #include <linux/vmalloc.h> #include <linux/highmem.h> #include "hfi.h" #include "common.h" #include "qp.h" #include "sdma.h" #include "iowait.h" #include "trace.h" /* must be a power of 2 >= 64 <= 32768 */ #define SDMA_DESCQ_CNT 2048 #define SDMA_DESC_INTR 64 #define INVALID_TAIL 0xffff #define SDMA_PAD max_t(size_t, MAX_16B_PADDING, sizeof(u32)) static uint sdma_descq_cnt = SDMA_DESCQ_CNT; module_param(sdma_descq_cnt, uint, S_IRUGO); MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries"); static uint sdma_idle_cnt = 250; module_param(sdma_idle_cnt, uint, S_IRUGO); MODULE_PARM_DESC(sdma_idle_cnt, "sdma interrupt idle delay (ns,default 250)"); uint mod_num_sdma; module_param_named(num_sdma, mod_num_sdma, uint, S_IRUGO); MODULE_PARM_DESC(num_sdma, "Set max number SDMA engines to use"); static uint sdma_desct_intr = SDMA_DESC_INTR; module_param_named(desct_intr, sdma_desct_intr, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(desct_intr, "Number of SDMA descriptor before interrupt"); #define SDMA_WAIT_BATCH_SIZE 20 /* max wait time for a SDMA engine to indicate it has halted */ #define SDMA_ERR_HALT_TIMEOUT 10 /* ms */ /* all SDMA engine errors that cause a halt */ #define SD(name) SEND_DMA_##name #define ALL_SDMA_ENG_HALT_ERRS \ (SD(ENG_ERR_STATUS_SDMA_WRONG_DW_ERR_SMASK) \ | SD(ENG_ERR_STATUS_SDMA_GEN_MISMATCH_ERR_SMASK) \ | SD(ENG_ERR_STATUS_SDMA_TOO_LONG_ERR_SMASK) \ | SD(ENG_ERR_STATUS_SDMA_TAIL_OUT_OF_BOUNDS_ERR_SMASK) \ | SD(ENG_ERR_STATUS_SDMA_FIRST_DESC_ERR_SMASK) \ | SD(ENG_ERR_STATUS_SDMA_MEM_READ_ERR_SMASK) \ | SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK) \ | SD(ENG_ERR_STATUS_SDMA_LENGTH_MISMATCH_ERR_SMASK) \ | SD(ENG_ERR_STATUS_SDMA_PACKET_DESC_OVERFLOW_ERR_SMASK) \ | SD(ENG_ERR_STATUS_SDMA_HEADER_SELECT_ERR_SMASK) \ | SD(ENG_ERR_STATUS_SDMA_HEADER_ADDRESS_ERR_SMASK) \ | SD(ENG_ERR_STATUS_SDMA_HEADER_LENGTH_ERR_SMASK) \ | SD(ENG_ERR_STATUS_SDMA_TIMEOUT_ERR_SMASK) \ | SD(ENG_ERR_STATUS_SDMA_DESC_TABLE_UNC_ERR_SMASK) \ | SD(ENG_ERR_STATUS_SDMA_ASSEMBLY_UNC_ERR_SMASK) \ | SD(ENG_ERR_STATUS_SDMA_PACKET_TRACKING_UNC_ERR_SMASK) \ | SD(ENG_ERR_STATUS_SDMA_HEADER_STORAGE_UNC_ERR_SMASK) \ | SD(ENG_ERR_STATUS_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SMASK)) /* sdma_sendctrl operations */ #define SDMA_SENDCTRL_OP_ENABLE BIT(0) #define SDMA_SENDCTRL_OP_INTENABLE BIT(1) #define SDMA_SENDCTRL_OP_HALT BIT(2) #define SDMA_SENDCTRL_OP_CLEANUP BIT(3) /* handle long defines */ #define SDMA_EGRESS_PACKET_OCCUPANCY_SMASK \ SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SMASK #define SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT \ SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT static const char * const sdma_state_names[] = { [sdma_state_s00_hw_down] = "s00_HwDown", [sdma_state_s10_hw_start_up_halt_wait] = "s10_HwStartUpHaltWait", [sdma_state_s15_hw_start_up_clean_wait] = "s15_HwStartUpCleanWait", [sdma_state_s20_idle] = "s20_Idle", [sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait", [sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait", [sdma_state_s50_hw_halt_wait] = "s50_HwHaltWait", [sdma_state_s60_idle_halt_wait] = "s60_IdleHaltWait", [sdma_state_s80_hw_freeze] = "s80_HwFreeze", [sdma_state_s82_freeze_sw_clean] = "s82_FreezeSwClean", [sdma_state_s99_running] = "s99_Running", }; #ifdef CONFIG_SDMA_VERBOSITY static const char * const sdma_event_names[] = { [sdma_event_e00_go_hw_down] = "e00_GoHwDown", [sdma_event_e10_go_hw_start] = "e10_GoHwStart", [sdma_event_e15_hw_halt_done] = "e15_HwHaltDone", [sdma_event_e25_hw_clean_up_done] = "e25_HwCleanUpDone", [sdma_event_e30_go_running] = "e30_GoRunning", [sdma_event_e40_sw_cleaned] = "e40_SwCleaned", [sdma_event_e50_hw_cleaned] = "e50_HwCleaned", [sdma_event_e60_hw_halted] = "e60_HwHalted", [sdma_event_e70_go_idle] = "e70_GoIdle", [sdma_event_e80_hw_freeze] = "e80_HwFreeze", [sdma_event_e81_hw_frozen] = "e81_HwFrozen", [sdma_event_e82_hw_unfreeze] = "e82_HwUnfreeze", [sdma_event_e85_link_down] = "e85_LinkDown", [sdma_event_e90_sw_halted] = "e90_SwHalted", }; #endif static const struct sdma_set_state_action sdma_action_table[] = { [sdma_state_s00_hw_down] = { .go_s99_running_tofalse = 1, .op_enable = 0, .op_intenable = 0, .op_halt = 0, .op_cleanup = 0, }, [sdma_state_s10_hw_start_up_halt_wait] = { .op_enable = 0, .op_intenable = 0, .op_halt = 1, .op_cleanup = 0, }, [sdma_state_s15_hw_start_up_clean_wait] = { .op_enable = 0, .op_intenable = 1, .op_halt = 0, .op_cleanup = 1, }, [sdma_state_s20_idle] = { .op_enable = 0, .op_intenable = 1, .op_halt = 0, .op_cleanup = 0, }, [sdma_state_s30_sw_clean_up_wait] = { .op_enable = 0, .op_intenable = 0, .op_halt = 0, .op_cleanup = 0, }, [sdma_state_s40_hw_clean_up_wait] = { .op_enable = 0, .op_intenable = 0, .op_halt = 0, .op_cleanup = 1, }, [sdma_state_s50_hw_halt_wait] = { .op_enable = 0, .op_intenable = 0, .op_halt = 0, .op_cleanup = 0, }, [sdma_state_s60_idle_halt_wait] = { .go_s99_running_tofalse = 1, .op_enable = 0, .op_intenable = 0, .op_halt = 1, .op_cleanup = 0, }, [sdma_state_s80_hw_freeze] = { .op_enable = 0, .op_intenable = 0, .op_halt = 0, .op_cleanup = 0, }, [sdma_state_s82_freeze_sw_clean] = { .op_enable = 0, .op_intenable = 0, .op_halt = 0, .op_cleanup = 0, }, [sdma_state_s99_running] = { .op_enable = 1, .op_intenable = 1, .op_halt = 0, .op_cleanup = 0, .go_s99_running_totrue = 1, }, }; #define SDMA_TAIL_UPDATE_THRESH 0x1F /* declare all statics here rather than keep sorting */ static void sdma_complete(struct kref *); static void sdma_finalput(struct sdma_state *); static void sdma_get(struct sdma_state *); static void sdma_hw_clean_up_task(struct tasklet_struct *); static void sdma_put(struct sdma_state *); static void sdma_set_state(struct sdma_engine *, enum sdma_states); static void sdma_start_hw_clean_up(struct sdma_engine *); static void sdma_sw_clean_up_task(struct tasklet_struct *); static void sdma_sendctrl(struct sdma_engine *, unsigned); static void init_sdma_regs(struct sdma_engine *, u32, uint); static void sdma_process_event( struct sdma_engine *sde, enum sdma_events event); static void __sdma_process_event( struct sdma_engine *sde, enum sdma_events event); static void dump_sdma_state(struct sdma_engine *sde); static void sdma_make_progress(struct sdma_engine *sde, u64 status); static void sdma_desc_avail(struct sdma_engine *sde, uint avail); static void sdma_flush_descq(struct sdma_engine *sde); /** * sdma_state_name() - return state string from enum * @state: state */ static const char *sdma_state_name(enum sdma_states state) { return sdma_state_names[state]; } static void sdma_get(struct sdma_state *ss) { kref_get(&ss->kref); } static void sdma_complete(struct kref *kref) { struct sdma_state *ss = container_of(kref, struct sdma_state, kref); complete(&ss->comp); } static void sdma_put(struct sdma_state *ss) { kref_put(&ss->kref, sdma_complete); } static void sdma_finalput(struct sdma_state *ss) { sdma_put(ss); wait_for_completion(&ss->comp); } static inline void write_sde_csr( struct sdma_engine *sde, u32 offset0, u64 value) { write_kctxt_csr(sde->dd, sde->this_idx, offset0, value); } static inline u64 read_sde_csr( struct sdma_engine *sde, u32 offset0) { return read_kctxt_csr(sde->dd, sde->this_idx, offset0); } /* * sdma_wait_for_packet_egress() - wait for the VL FIFO occupancy for * sdma engine 'sde' to drop to 0. */ static void sdma_wait_for_packet_egress(struct sdma_engine *sde, int pause) { u64 off = 8 * sde->this_idx; struct hfi1_devdata *dd = sde->dd; int lcnt = 0; u64 reg_prev; u64 reg = 0; while (1) { reg_prev = reg; reg = read_csr(dd, off + SEND_EGRESS_SEND_DMA_STATUS); reg &= SDMA_EGRESS_PACKET_OCCUPANCY_SMASK; reg >>= SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT; if (reg == 0) break; /* counter is reest if accupancy count changes */ if (reg != reg_prev) lcnt = 0; if (lcnt++ > 500) { /* timed out - bounce the link */ dd_dev_err(dd, "%s: engine %u timeout waiting for packets to egress, remaining count %u, bouncing link\n", __func__, sde->this_idx, (u32)reg); queue_work(dd->pport->link_wq, &dd->pport->link_bounce_work); break; } udelay(1); } } /* * sdma_wait() - wait for packet egress to complete for all SDMA engines, * and pause for credit return. */ void sdma_wait(struct hfi1_devdata *dd) { int i; for (i = 0; i < dd->num_sdma; i++) { struct sdma_engine *sde = &dd->per_sdma[i]; sdma_wait_for_packet_egress(sde, 0); } } static inline void sdma_set_desc_cnt(struct sdma_engine *sde, unsigned cnt) { u64 reg; if (!(sde->dd->flags & HFI1_HAS_SDMA_TIMEOUT)) return; reg = cnt; reg &= SD(DESC_CNT_CNT_MASK); reg <<= SD(DESC_CNT_CNT_SHIFT); write_sde_csr(sde, SD(DESC_CNT), reg); } static inline void complete_tx(struct sdma_engine *sde, struct sdma_txreq *tx, int res) { /* protect against complete modifying */ struct iowait *wait = tx->wait; callback_t complete = tx->complete; #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER trace_hfi1_sdma_out_sn(sde, tx->sn); if (WARN_ON_ONCE(sde->head_sn != tx->sn)) dd_dev_err(sde->dd, "expected %llu got %llu\n", sde->head_sn, tx->sn); sde->head_sn++; #endif __sdma_txclean(sde->dd, tx); if (complete) (*complete)(tx, res); if (iowait_sdma_dec(wait)) iowait_drain_wakeup(wait); } /* * Complete all the sdma requests with a SDMA_TXREQ_S_ABORTED status * * Depending on timing there can be txreqs in two places: * - in the descq ring * - in the flush list * * To avoid ordering issues the descq ring needs to be flushed * first followed by the flush list. * * This routine is called from two places * - From a work queue item * - Directly from the state machine just before setting the * state to running * * Must be called with head_lock held * */ static void sdma_flush(struct sdma_engine *sde) { struct sdma_txreq *txp, *txp_next; LIST_HEAD(flushlist); unsigned long flags; uint seq; /* flush from head to tail */ sdma_flush_descq(sde); spin_lock_irqsave(&sde->flushlist_lock, flags); /* copy flush list */ list_splice_init(&sde->flushlist, &flushlist); spin_unlock_irqrestore(&sde->flushlist_lock, flags); /* flush from flush list */ list_for_each_entry_safe(txp, txp_next, &flushlist, list) complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED); /* wakeup QPs orphaned on the dmawait list */ do { struct iowait *w, *nw; seq = read_seqbegin(&sde->waitlock); if (!list_empty(&sde->dmawait)) { write_seqlock(&sde->waitlock); list_for_each_entry_safe(w, nw, &sde->dmawait, list) { if (w->wakeup) { w->wakeup(w, SDMA_AVAIL_REASON); list_del_init(&w->list); } } write_sequnlock(&sde->waitlock); } } while (read_seqretry(&sde->waitlock, seq)); } /* * Fields a work request for flushing the descq ring * and the flush list * * If the engine has been brought to running during * the scheduling delay, the flush is ignored, assuming * that the process of bringing the engine to running * would have done this flush prior to going to running. * */ static void sdma_field_flush(struct work_struct *work) { unsigned long flags; struct sdma_engine *sde = container_of(work, struct sdma_engine, flush_worker); write_seqlock_irqsave(&sde->head_lock, flags); if (!__sdma_running(sde)) sdma_flush(sde); write_sequnlock_irqrestore(&sde->head_lock, flags); } static void sdma_err_halt_wait(struct work_struct *work) { struct sdma_engine *sde = container_of(work, struct sdma_engine, err_halt_worker); u64 statuscsr; unsigned long timeout; timeout = jiffies + msecs_to_jiffies(SDMA_ERR_HALT_TIMEOUT); while (1) { statuscsr = read_sde_csr(sde, SD(STATUS)); statuscsr &= SD(STATUS_ENG_HALTED_SMASK); if (statuscsr) break; if (time_after(jiffies, timeout)) { dd_dev_err(sde->dd, "SDMA engine %d - timeout waiting for engine to halt\n", sde->this_idx); /* * Continue anyway. This could happen if there was * an uncorrectable error in the wrong spot. */ break; } usleep_range(80, 120); } sdma_process_event(sde, sdma_event_e15_hw_halt_done); } static void sdma_err_progress_check_schedule(struct sdma_engine *sde) { if (!is_bx(sde->dd) && HFI1_CAP_IS_KSET(SDMA_AHG)) { unsigned index; struct hfi1_devdata *dd = sde->dd; for (index = 0; index < dd->num_sdma; index++) { struct sdma_engine *curr_sdma = &dd->per_sdma[index]; if (curr_sdma != sde) curr_sdma->progress_check_head = curr_sdma->descq_head; } dd_dev_err(sde->dd, "SDMA engine %d - check scheduled\n", sde->this_idx); mod_timer(&sde->err_progress_check_timer, jiffies + 10); } } static void sdma_err_progress_check(struct timer_list *t) { unsigned index; struct sdma_engine *sde = from_timer(sde, t, err_progress_check_timer); dd_dev_err(sde->dd, "SDE progress check event\n"); for (index = 0; index < sde->dd->num_sdma; index++) { struct sdma_engine *curr_sde = &sde->dd->per_sdma[index]; unsigned long flags; /* check progress on each engine except the current one */ if (curr_sde == sde) continue; /* * We must lock interrupts when acquiring sde->lock, * to avoid a deadlock if interrupt triggers and spins on * the same lock on same CPU */ spin_lock_irqsave(&curr_sde->tail_lock, flags); write_seqlock(&curr_sde->head_lock); /* skip non-running queues */ if (curr_sde->state.current_state != sdma_state_s99_running) { write_sequnlock(&curr_sde->head_lock); spin_unlock_irqrestore(&curr_sde->tail_lock, flags); continue; } if ((curr_sde->descq_head != curr_sde->descq_tail) && (curr_sde->descq_head == curr_sde->progress_check_head)) __sdma_process_event(curr_sde, sdma_event_e90_sw_halted); write_sequnlock(&curr_sde->head_lock); spin_unlock_irqrestore(&curr_sde->tail_lock, flags); } schedule_work(&sde->err_halt_worker); } static void sdma_hw_clean_up_task(struct tasklet_struct *t) { struct sdma_engine *sde = from_tasklet(sde, t, sdma_hw_clean_up_task); u64 statuscsr; while (1) { #ifdef CONFIG_SDMA_VERBOSITY dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); #endif statuscsr = read_sde_csr(sde, SD(STATUS)); statuscsr &= SD(STATUS_ENG_CLEANED_UP_SMASK); if (statuscsr) break; udelay(10); } sdma_process_event(sde, sdma_event_e25_hw_clean_up_done); } static inline struct sdma_txreq *get_txhead(struct sdma_engine *sde) { return sde->tx_ring[sde->tx_head & sde->sdma_mask]; } /* * flush ring for recovery */ static void sdma_flush_descq(struct sdma_engine *sde) { u16 head, tail; int progress = 0; struct sdma_txreq *txp = get_txhead(sde); /* The reason for some of the complexity of this code is that * not all descriptors have corresponding txps. So, we have to * be able to skip over descs until we wander into the range of * the next txp on the list. */ head = sde->descq_head & sde->sdma_mask; tail = sde->descq_tail & sde->sdma_mask; while (head != tail) { /* advance head, wrap if needed */ head = ++sde->descq_head & sde->sdma_mask; /* if now past this txp's descs, do the callback */ if (txp && txp->next_descq_idx == head) { /* remove from list */ sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL; complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED); trace_hfi1_sdma_progress(sde, head, tail, txp); txp = get_txhead(sde); } progress++; } if (progress) sdma_desc_avail(sde, sdma_descq_freecnt(sde)); } static void sdma_sw_clean_up_task(struct tasklet_struct *t) { struct sdma_engine *sde = from_tasklet(sde, t, sdma_sw_clean_up_task); unsigned long flags; spin_lock_irqsave(&sde->tail_lock, flags); write_seqlock(&sde->head_lock); /* * At this point, the following should always be true: * - We are halted, so no more descriptors are getting retired. * - We are not running, so no one is submitting new work. * - Only we can send the e40_sw_cleaned, so we can't start * running again until we say so. So, the active list and * descq are ours to play with. */ /* * In the error clean up sequence, software clean must be called * before the hardware clean so we can use the hardware head in * the progress routine. A hardware clean or SPC unfreeze will * reset the hardware head. * * Process all retired requests. The progress routine will use the * latest physical hardware head - we are not running so speed does * not matter. */ sdma_make_progress(sde, 0); sdma_flush(sde); /* * Reset our notion of head and tail. * Note that the HW registers have been reset via an earlier * clean up. */ sde->descq_tail = 0; sde->descq_head = 0; sde->desc_avail = sdma_descq_freecnt(sde); *sde->head_dma = 0; __sdma_process_event(sde, sdma_event_e40_sw_cleaned); write_sequnlock(&sde->head_lock); spin_unlock_irqrestore(&sde->tail_lock, flags); } static void sdma_sw_tear_down(struct sdma_engine *sde) { struct sdma_state *ss = &sde->state; /* Releasing this reference means the state machine has stopped. */ sdma_put(ss); /* stop waiting for all unfreeze events to complete */ atomic_set(&sde->dd->sdma_unfreeze_count, -1); wake_up_interruptible(&sde->dd->sdma_unfreeze_wq); } static void sdma_start_hw_clean_up(struct sdma_engine *sde) { tasklet_hi_schedule(&sde->sdma_hw_clean_up_task); } static void sdma_set_state(struct sdma_engine *sde, enum sdma_states next_state) { struct sdma_state *ss = &sde->state; const struct sdma_set_state_action *action = sdma_action_table; unsigned op = 0; trace_hfi1_sdma_state( sde, sdma_state_names[ss->current_state], sdma_state_names[next_state]); /* debugging bookkeeping */ ss->previous_state = ss->current_state; ss->previous_op = ss->current_op; ss->current_state = next_state; if (ss->previous_state != sdma_state_s99_running && next_state == sdma_state_s99_running) sdma_flush(sde); if (action[next_state].op_enable) op |= SDMA_SENDCTRL_OP_ENABLE; if (action[next_state].op_intenable) op |= SDMA_SENDCTRL_OP_INTENABLE; if (action[next_state].op_halt) op |= SDMA_SENDCTRL_OP_HALT; if (action[next_state].op_cleanup) op |= SDMA_SENDCTRL_OP_CLEANUP; if (action[next_state].go_s99_running_tofalse) ss->go_s99_running = 0; if (action[next_state].go_s99_running_totrue) ss->go_s99_running = 1; ss->current_op = op; sdma_sendctrl(sde, ss->current_op); } /** * sdma_get_descq_cnt() - called when device probed * * Return a validated descq count. * * This is currently only used in the verbs initialization to build the tx * list. * * This will probably be deleted in favor of a more scalable approach to * alloc tx's. * */ u16 sdma_get_descq_cnt(void) { u16 count = sdma_descq_cnt; if (!count) return SDMA_DESCQ_CNT; /* count must be a power of 2 greater than 64 and less than * 32768. Otherwise return default. */ if (!is_power_of_2(count)) return SDMA_DESCQ_CNT; if (count < 64 || count > 32768) return SDMA_DESCQ_CNT; return count; } /** * sdma_engine_get_vl() - return vl for a given sdma engine * @sde: sdma engine * * This function returns the vl mapped to a given engine, or an error if * the mapping can't be found. The mapping fields are protected by RCU. */ int sdma_engine_get_vl(struct sdma_engine *sde) { struct hfi1_devdata *dd = sde->dd; struct sdma_vl_map *m; u8 vl; if (sde->this_idx >= TXE_NUM_SDMA_ENGINES) return -EINVAL; rcu_read_lock(); m = rcu_dereference(dd->sdma_map); if (unlikely(!m)) { rcu_read_unlock(); return -EINVAL; } vl = m->engine_to_vl[sde->this_idx]; rcu_read_unlock(); return vl; } /** * sdma_select_engine_vl() - select sdma engine * @dd: devdata * @selector: a spreading factor * @vl: this vl * * * This function returns an engine based on the selector and a vl. The * mapping fields are protected by RCU. */ struct sdma_engine *sdma_select_engine_vl( struct hfi1_devdata *dd, u32 selector, u8 vl) { struct sdma_vl_map *m; struct sdma_map_elem *e; struct sdma_engine *rval; /* NOTE This should only happen if SC->VL changed after the initial * checks on the QP/AH * Default will return engine 0 below */ if (vl >= num_vls) { rval = NULL; goto done; } rcu_read_lock(); m = rcu_dereference(dd->sdma_map); if (unlikely(!m)) { rcu_read_unlock(); return &dd->per_sdma[0]; } e = m->map[vl & m->mask]; rval = e->sde[selector & e->mask]; rcu_read_unlock(); done: rval = !rval ? &dd->per_sdma[0] : rval; trace_hfi1_sdma_engine_select(dd, selector, vl, rval->this_idx); return rval; } /** * sdma_select_engine_sc() - select sdma engine * @dd: devdata * @selector: a spreading factor * @sc5: the 5 bit sc * * * This function returns an engine based on the selector and an sc. */ struct sdma_engine *sdma_select_engine_sc( struct hfi1_devdata *dd, u32 selector, u8 sc5) { u8 vl = sc_to_vlt(dd, sc5); return sdma_select_engine_vl(dd, selector, vl); } struct sdma_rht_map_elem { u32 mask; u8 ctr; struct sdma_engine *sde[]; }; struct sdma_rht_node { unsigned long cpu_id; struct sdma_rht_map_elem *map[HFI1_MAX_VLS_SUPPORTED]; struct rhash_head node; }; #define NR_CPUS_HINT 192 static const struct rhashtable_params sdma_rht_params = { .nelem_hint = NR_CPUS_HINT, .head_offset = offsetof(struct sdma_rht_node, node), .key_offset = offsetof(struct sdma_rht_node, cpu_id), .key_len = sizeof_field(struct sdma_rht_node, cpu_id), .max_size = NR_CPUS, .min_size = 8, .automatic_shrinking = true, }; /* * sdma_select_user_engine() - select sdma engine based on user setup * @dd: devdata * @selector: a spreading factor * @vl: this vl * * This function returns an sdma engine for a user sdma request. * User defined sdma engine affinity setting is honored when applicable, * otherwise system default sdma engine mapping is used. To ensure correct * ordering, the mapping from <selector, vl> to sde must remain unchanged. */ struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd, u32 selector, u8 vl) { struct sdma_rht_node *rht_node; struct sdma_engine *sde = NULL; unsigned long cpu_id; /* * To ensure that always the same sdma engine(s) will be * selected make sure the process is pinned to this CPU only. */ if (current->nr_cpus_allowed != 1) goto out; rcu_read_lock(); cpu_id = smp_processor_id(); rht_node = rhashtable_lookup(dd->sdma_rht, &cpu_id, sdma_rht_params); if (rht_node && rht_node->map[vl]) { struct sdma_rht_map_elem *map = rht_node->map[vl]; sde = map->sde[selector & map->mask]; } rcu_read_unlock(); if (sde) return sde; out: return sdma_select_engine_vl(dd, selector, vl); } static void sdma_populate_sde_map(struct sdma_rht_map_elem *map) { int i; for (i = 0; i < roundup_pow_of_two(map->ctr ? : 1) - map->ctr; i++) map->sde[map->ctr + i] = map->sde[i]; } static void sdma_cleanup_sde_map(struct sdma_rht_map_elem *map, struct sdma_engine *sde) { unsigned int i, pow; /* only need to check the first ctr entries for a match */ for (i = 0; i < map->ctr; i++) { if (map->sde[i] == sde) { memmove(&map->sde[i], &map->sde[i + 1], (map->ctr - i - 1) * sizeof(map->sde[0])); map->ctr--; pow = roundup_pow_of_two(map->ctr ? : 1); map->mask = pow - 1; sdma_populate_sde_map(map); break; } } } /* * Prevents concurrent reads and writes of the sdma engine cpu_mask */ static DEFINE_MUTEX(process_to_sde_mutex); ssize_t sdma_set_cpu_to_sde_map(struct sdma_engine *sde, const char *buf, size_t count) { struct hfi1_devdata *dd = sde->dd; cpumask_var_t mask, new_mask; unsigned long cpu; int ret, vl, sz; struct sdma_rht_node *rht_node; vl = sdma_engine_get_vl(sde); if (unlikely(vl < 0 || vl >= ARRAY_SIZE(rht_node->map))) return -EINVAL; ret = zalloc_cpumask_var(&mask, GFP_KERNEL); if (!ret) return -ENOMEM; ret = zalloc_cpumask_var(&new_mask, GFP_KERNEL); if (!ret) { free_cpumask_var(mask); return -ENOMEM; } ret = cpulist_parse(buf, mask); if (ret) goto out_free; if (!cpumask_subset(mask, cpu_online_mask)) { dd_dev_warn(sde->dd, "Invalid CPU mask\n"); ret = -EINVAL; goto out_free; } sz = sizeof(struct sdma_rht_map_elem) + (TXE_NUM_SDMA_ENGINES * sizeof(struct sdma_engine *)); mutex_lock(&process_to_sde_mutex); for_each_cpu(cpu, mask) { /* Check if we have this already mapped */ if (cpumask_test_cpu(cpu, &sde->cpu_mask)) { cpumask_set_cpu(cpu, new_mask); continue; } rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu, sdma_rht_params); if (!rht_node) { rht_node = kzalloc(sizeof(*rht_node), GFP_KERNEL); if (!rht_node) { ret = -ENOMEM; goto out; } rht_node->map[vl] = kzalloc(sz, GFP_KERNEL); if (!rht_node->map[vl]) { kfree(rht_node); ret = -ENOMEM; goto out; } rht_node->cpu_id = cpu; rht_node->map[vl]->mask = 0; rht_node->map[vl]->ctr = 1; rht_node->map[vl]->sde[0] = sde; ret = rhashtable_insert_fast(dd->sdma_rht, &rht_node->node, sdma_rht_params); if (ret) { kfree(rht_node->map[vl]); kfree(rht_node); dd_dev_err(sde->dd, "Failed to set process to sde affinity for cpu %lu\n", cpu); goto out; } } else { int ctr, pow; /* Add new user mappings */ if (!rht_node->map[vl]) rht_node->map[vl] = kzalloc(sz, GFP_KERNEL); if (!rht_node->map[vl]) { ret = -ENOMEM; goto out; } rht_node->map[vl]->ctr++; ctr = rht_node->map[vl]->ctr; rht_node->map[vl]->sde[ctr - 1] = sde; pow = roundup_pow_of_two(ctr); rht_node->map[vl]->mask = pow - 1; /* Populate the sde map table */ sdma_populate_sde_map(rht_node->map[vl]); } cpumask_set_cpu(cpu, new_mask); } /* Clean up old mappings */ for_each_cpu(cpu, cpu_online_mask) { struct sdma_rht_node *rht_node; /* Don't cleanup sdes that are set in the new mask */ if (cpumask_test_cpu(cpu, mask)) continue; rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu, sdma_rht_params); if (rht_node) { bool empty = true; int i; /* Remove mappings for old sde */ for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) if (rht_node->map[i]) sdma_cleanup_sde_map(rht_node->map[i], sde); /* Free empty hash table entries */ for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) { if (!rht_node->map[i]) continue; if (rht_node->map[i]->ctr) { empty = false; break; } } if (empty) { ret = rhashtable_remove_fast(dd->sdma_rht, &rht_node->node, sdma_rht_params); WARN_ON(ret); for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) kfree(rht_node->map[i]); kfree(rht_node); } } } cpumask_copy(&sde->cpu_mask, new_mask); out: mutex_unlock(&process_to_sde_mutex); out_free: free_cpumask_var(mask); free_cpumask_var(new_mask); return ret ? : strnlen(buf, PAGE_SIZE); } ssize_t sdma_get_cpu_to_sde_map(struct sdma_engine *sde, char *buf) { mutex_lock(&process_to_sde_mutex); if (cpumask_empty(&sde->cpu_mask)) snprintf(buf, PAGE_SIZE, "%s\n", "empty"); else cpumap_print_to_pagebuf(true, buf, &sde->cpu_mask); mutex_unlock(&process_to_sde_mutex); return strnlen(buf, PAGE_SIZE); } static void sdma_rht_free(void *ptr, void *arg) { struct sdma_rht_node *rht_node = ptr; int i; for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) kfree(rht_node->map[i]); kfree(rht_node); } /** * sdma_seqfile_dump_cpu_list() - debugfs dump the cpu to sdma mappings * @s: seq file * @dd: hfi1_devdata * @cpuid: cpu id * * This routine dumps the process to sde mappings per cpu */ void sdma_seqfile_dump_cpu_list(struct seq_file *s, struct hfi1_devdata *dd, unsigned long cpuid) { struct sdma_rht_node *rht_node; int i, j; rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpuid, sdma_rht_params); if (!rht_node) return; seq_printf(s, "cpu%3lu: ", cpuid); for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) { if (!rht_node->map[i] || !rht_node->map[i]->ctr) continue; seq_printf(s, " vl%d: [", i); for (j = 0; j < rht_node->map[i]->ctr; j++) { if (!rht_node->map[i]->sde[j]) continue; if (j > 0) seq_puts(s, ","); seq_printf(s, " sdma%2d", rht_node->map[i]->sde[j]->this_idx); } seq_puts(s, " ]"); } seq_puts(s, "\n"); } /* * Free the indicated map struct */ static void sdma_map_free(struct sdma_vl_map *m) { int i; for (i = 0; m && i < m->actual_vls; i++) kfree(m->map[i]); kfree(m); } /* * Handle RCU callback */ static void sdma_map_rcu_callback(struct rcu_head *list) { struct sdma_vl_map *m = container_of(list, struct sdma_vl_map, list); sdma_map_free(m); } /** * sdma_map_init - called when # vls change * @dd: hfi1_devdata * @port: port number * @num_vls: number of vls * @vl_engines: per vl engine mapping (optional) * * This routine changes the mapping based on the number of vls. * * vl_engines is used to specify a non-uniform vl/engine loading. NULL * implies auto computing the loading and giving each VLs a uniform * distribution of engines per VL. * * The auto algorithm computes the sde_per_vl and the number of extra * engines. Any extra engines are added from the last VL on down. * * rcu locking is used here to control access to the mapping fields. * * If either the num_vls or num_sdma are non-power of 2, the array sizes * in the struct sdma_vl_map and the struct sdma_map_elem are rounded * up to the next highest power of 2 and the first entry is reused * in a round robin fashion. * * If an error occurs the map change is not done and the mapping is * not changed. * */ int sdma_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_engines) { int i, j; int extra, sde_per_vl; int engine = 0; u8 lvl_engines[OPA_MAX_VLS]; struct sdma_vl_map *oldmap, *newmap; if (!(dd->flags & HFI1_HAS_SEND_DMA)) return 0; if (!vl_engines) { /* truncate divide */ sde_per_vl = dd->num_sdma / num_vls; /* extras */ extra = dd->num_sdma % num_vls; vl_engines = lvl_engines; /* add extras from last vl down */ for (i = num_vls - 1; i >= 0; i--, extra--) vl_engines[i] = sde_per_vl + (extra > 0 ? 1 : 0); } /* build new map */ newmap = kzalloc( sizeof(struct sdma_vl_map) + roundup_pow_of_two(num_vls) * sizeof(struct sdma_map_elem *), GFP_KERNEL); if (!newmap) goto bail; newmap->actual_vls = num_vls; newmap->vls = roundup_pow_of_two(num_vls); newmap->mask = (1 << ilog2(newmap->vls)) - 1; /* initialize back-map */ for (i = 0; i < TXE_NUM_SDMA_ENGINES; i++) newmap->engine_to_vl[i] = -1; for (i = 0; i < newmap->vls; i++) { /* save for wrap around */ int first_engine = engine; if (i < newmap->actual_vls) { int sz = roundup_pow_of_two(vl_engines[i]); /* only allocate once */ newmap->map[i] = kzalloc( sizeof(struct sdma_map_elem) + sz * sizeof(struct sdma_engine *), GFP_KERNEL); if (!newmap->map[i]) goto bail; newmap->map[i]->mask = (1 << ilog2(sz)) - 1; /* assign engines */ for (j = 0; j < sz; j++) { newmap->map[i]->sde[j] = &dd->per_sdma[engine]; if (++engine >= first_engine + vl_engines[i]) /* wrap back to first engine */ engine = first_engine; } /* assign back-map */ for (j = 0; j < vl_engines[i]; j++) newmap->engine_to_vl[first_engine + j] = i; } else { /* just re-use entry without allocating */ newmap->map[i] = newmap->map[i % num_vls]; } engine = first_engine + vl_engines[i]; } /* newmap in hand, save old map */ spin_lock_irq(&dd->sde_map_lock); oldmap = rcu_dereference_protected(dd->sdma_map, lockdep_is_held(&dd->sde_map_lock)); /* publish newmap */ rcu_assign_pointer(dd->sdma_map, newmap); spin_unlock_irq(&dd->sde_map_lock); /* success, free any old map after grace period */ if (oldmap) call_rcu(&oldmap->list, sdma_map_rcu_callback); return 0; bail: /* free any partial allocation */ sdma_map_free(newmap); return -ENOMEM; } /** * sdma_clean - Clean up allocated memory * @dd: struct hfi1_devdata * @num_engines: num sdma engines * * This routine can be called regardless of the success of * sdma_init() */ void sdma_clean(struct hfi1_devdata *dd, size_t num_engines) { size_t i; struct sdma_engine *sde; if (dd->sdma_pad_dma) { dma_free_coherent(&dd->pcidev->dev, SDMA_PAD, (void *)dd->sdma_pad_dma, dd->sdma_pad_phys); dd->sdma_pad_dma = NULL; dd->sdma_pad_phys = 0; } if (dd->sdma_heads_dma) { dma_free_coherent(&dd->pcidev->dev, dd->sdma_heads_size, (void *)dd->sdma_heads_dma, dd->sdma_heads_phys); dd->sdma_heads_dma = NULL; dd->sdma_heads_phys = 0; } for (i = 0; dd->per_sdma && i < num_engines; ++i) { sde = &dd->per_sdma[i]; sde->head_dma = NULL; sde->head_phys = 0; if (sde->descq) { dma_free_coherent( &dd->pcidev->dev, sde->descq_cnt * sizeof(u64[2]), sde->descq, sde->descq_phys ); sde->descq = NULL; sde->descq_phys = 0; } kvfree(sde->tx_ring); sde->tx_ring = NULL; } if (rcu_access_pointer(dd->sdma_map)) { spin_lock_irq(&dd->sde_map_lock); sdma_map_free(rcu_access_pointer(dd->sdma_map)); RCU_INIT_POINTER(dd->sdma_map, NULL); spin_unlock_irq(&dd->sde_map_lock); synchronize_rcu(); } kfree(dd->per_sdma); dd->per_sdma = NULL; if (dd->sdma_rht) { rhashtable_free_and_destroy(dd->sdma_rht, sdma_rht_free, NULL); kfree(dd->sdma_rht); dd->sdma_rht = NULL; } } /** * sdma_init() - called when device probed * @dd: hfi1_devdata * @port: port number (currently only zero) * * Initializes each sde and its csrs. * Interrupts are not required to be enabled. * * Returns: * 0 - success, -errno on failure */ int sdma_init(struct hfi1_devdata *dd, u8 port) { unsigned this_idx; struct sdma_engine *sde; struct rhashtable *tmp_sdma_rht; u16 descq_cnt; void *curr_head; struct hfi1_pportdata *ppd = dd->pport + port; u32 per_sdma_credits; uint idle_cnt = sdma_idle_cnt; size_t num_engines = chip_sdma_engines(dd); int ret = -ENOMEM; if (!HFI1_CAP_IS_KSET(SDMA)) { HFI1_CAP_CLEAR(SDMA_AHG); return 0; } if (mod_num_sdma && /* can't exceed chip support */ mod_num_sdma <= chip_sdma_engines(dd) && /* count must be >= vls */ mod_num_sdma >= num_vls) num_engines = mod_num_sdma; dd_dev_info(dd, "SDMA mod_num_sdma: %u\n", mod_num_sdma); dd_dev_info(dd, "SDMA chip_sdma_engines: %u\n", chip_sdma_engines(dd)); dd_dev_info(dd, "SDMA chip_sdma_mem_size: %u\n", chip_sdma_mem_size(dd)); per_sdma_credits = chip_sdma_mem_size(dd) / (num_engines * SDMA_BLOCK_SIZE); /* set up freeze waitqueue */ init_waitqueue_head(&dd->sdma_unfreeze_wq); atomic_set(&dd->sdma_unfreeze_count, 0); descq_cnt = sdma_get_descq_cnt(); dd_dev_info(dd, "SDMA engines %zu descq_cnt %u\n", num_engines, descq_cnt); /* alloc memory for array of send engines */ dd->per_sdma = kcalloc_node(num_engines, sizeof(*dd->per_sdma), GFP_KERNEL, dd->node); if (!dd->per_sdma) return ret; idle_cnt = ns_to_cclock(dd, idle_cnt); if (idle_cnt) dd->default_desc1 = SDMA_DESC1_HEAD_TO_HOST_FLAG; else dd->default_desc1 = SDMA_DESC1_INT_REQ_FLAG; if (!sdma_desct_intr) sdma_desct_intr = SDMA_DESC_INTR; /* Allocate memory for SendDMA descriptor FIFOs */ for (this_idx = 0; this_idx < num_engines; ++this_idx) { sde = &dd->per_sdma[this_idx]; sde->dd = dd; sde->ppd = ppd; sde->this_idx = this_idx; sde->descq_cnt = descq_cnt; sde->desc_avail = sdma_descq_freecnt(sde); sde->sdma_shift = ilog2(descq_cnt); sde->sdma_mask = (1 << sde->sdma_shift) - 1; /* Create a mask specifically for each interrupt source */ sde->int_mask = (u64)1 << (0 * TXE_NUM_SDMA_ENGINES + this_idx); sde->progress_mask = (u64)1 << (1 * TXE_NUM_SDMA_ENGINES + this_idx); sde->idle_mask = (u64)1 << (2 * TXE_NUM_SDMA_ENGINES + this_idx); /* Create a combined mask to cover all 3 interrupt sources */ sde->imask = sde->int_mask | sde->progress_mask | sde->idle_mask; spin_lock_init(&sde->tail_lock); seqlock_init(&sde->head_lock); spin_lock_init(&sde->senddmactrl_lock); spin_lock_init(&sde->flushlist_lock); seqlock_init(&sde->waitlock); /* insure there is always a zero bit */ sde->ahg_bits = 0xfffffffe00000000ULL; sdma_set_state(sde, sdma_state_s00_hw_down); /* set up reference counting */ kref_init(&sde->state.kref); init_completion(&sde->state.comp); INIT_LIST_HEAD(&sde->flushlist); INIT_LIST_HEAD(&sde->dmawait); sde->tail_csr = get_kctxt_csr_addr(dd, this_idx, SD(TAIL)); tasklet_setup(&sde->sdma_hw_clean_up_task, sdma_hw_clean_up_task); tasklet_setup(&sde->sdma_sw_clean_up_task, sdma_sw_clean_up_task); INIT_WORK(&sde->err_halt_worker, sdma_err_halt_wait); INIT_WORK(&sde->flush_worker, sdma_field_flush); sde->progress_check_head = 0; timer_setup(&sde->err_progress_check_timer, sdma_err_progress_check, 0); sde->descq = dma_alloc_coherent(&dd->pcidev->dev, descq_cnt * sizeof(u64[2]), &sde->descq_phys, GFP_KERNEL); if (!sde->descq) goto bail; sde->tx_ring = kvzalloc_node(array_size(descq_cnt, sizeof(struct sdma_txreq *)), GFP_KERNEL, dd->node); if (!sde->tx_ring) goto bail; } dd->sdma_heads_size = L1_CACHE_BYTES * num_engines; /* Allocate memory for DMA of head registers to memory */ dd->sdma_heads_dma = dma_alloc_coherent(&dd->pcidev->dev, dd->sdma_heads_size, &dd->sdma_heads_phys, GFP_KERNEL); if (!dd->sdma_heads_dma) { dd_dev_err(dd, "failed to allocate SendDMA head memory\n"); goto bail; } /* Allocate memory for pad */ dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, SDMA_PAD, &dd->sdma_pad_phys, GFP_KERNEL); if (!dd->sdma_pad_dma) { dd_dev_err(dd, "failed to allocate SendDMA pad memory\n"); goto bail; } /* assign each engine to different cacheline and init registers */ curr_head = (void *)dd->sdma_heads_dma; for (this_idx = 0; this_idx < num_engines; ++this_idx) { unsigned long phys_offset; sde = &dd->per_sdma[this_idx]; sde->head_dma = curr_head; curr_head += L1_CACHE_BYTES; phys_offset = (unsigned long)sde->head_dma - (unsigned long)dd->sdma_heads_dma; sde->head_phys = dd->sdma_heads_phys + phys_offset; init_sdma_regs(sde, per_sdma_credits, idle_cnt); } dd->flags |= HFI1_HAS_SEND_DMA; dd->flags |= idle_cnt ? HFI1_HAS_SDMA_TIMEOUT : 0; dd->num_sdma = num_engines; ret = sdma_map_init(dd, port, ppd->vls_operational, NULL); if (ret < 0) goto bail; tmp_sdma_rht = kzalloc(sizeof(*tmp_sdma_rht), GFP_KERNEL); if (!tmp_sdma_rht) { ret = -ENOMEM; goto bail; } ret = rhashtable_init(tmp_sdma_rht, &sdma_rht_params); if (ret < 0) { kfree(tmp_sdma_rht); goto bail; } dd->sdma_rht = tmp_sdma_rht; dd_dev_info(dd, "SDMA num_sdma: %u\n", dd->num_sdma); return 0; bail: sdma_clean(dd, num_engines); return ret; } /** * sdma_all_running() - called when the link goes up * @dd: hfi1_devdata * * This routine moves all engines to the running state. */ void sdma_all_running(struct hfi1_devdata *dd) { struct sdma_engine *sde; unsigned int i; /* move all engines to running */ for (i = 0; i < dd->num_sdma; ++i) { sde = &dd->per_sdma[i]; sdma_process_event(sde, sdma_event_e30_go_running); } } /** * sdma_all_idle() - called when the link goes down * @dd: hfi1_devdata * * This routine moves all engines to the idle state. */ void sdma_all_idle(struct hfi1_devdata *dd) { struct sdma_engine *sde; unsigned int i; /* idle all engines */ for (i = 0; i < dd->num_sdma; ++i) { sde = &dd->per_sdma[i]; sdma_process_event(sde, sdma_event_e70_go_idle); } } /** * sdma_start() - called to kick off state processing for all engines * @dd: hfi1_devdata * * This routine is for kicking off the state processing for all required * sdma engines. Interrupts need to be working at this point. * */ void sdma_start(struct hfi1_devdata *dd) { unsigned i; struct sdma_engine *sde; /* kick off the engines state processing */ for (i = 0; i < dd->num_sdma; ++i) { sde = &dd->per_sdma[i]; sdma_process_event(sde, sdma_event_e10_go_hw_start); } } /** * sdma_exit() - used when module is removed * @dd: hfi1_devdata */ void sdma_exit(struct hfi1_devdata *dd) { unsigned this_idx; struct sdma_engine *sde; for (this_idx = 0; dd->per_sdma && this_idx < dd->num_sdma; ++this_idx) { sde = &dd->per_sdma[this_idx]; if (!list_empty(&sde->dmawait)) dd_dev_err(dd, "sde %u: dmawait list not empty!\n", sde->this_idx); sdma_process_event(sde, sdma_event_e00_go_hw_down); del_timer_sync(&sde->err_progress_check_timer); /* * This waits for the state machine to exit so it is not * necessary to kill the sdma_sw_clean_up_task to make sure * it is not running. */ sdma_finalput(&sde->state); } } /* * unmap the indicated descriptor */ static inline void sdma_unmap_desc( struct hfi1_devdata *dd, struct sdma_desc *descp) { switch (sdma_mapping_type(descp)) { case SDMA_MAP_SINGLE: dma_unmap_single(&dd->pcidev->dev, sdma_mapping_addr(descp), sdma_mapping_len(descp), DMA_TO_DEVICE); break; case SDMA_MAP_PAGE: dma_unmap_page(&dd->pcidev->dev, sdma_mapping_addr(descp), sdma_mapping_len(descp), DMA_TO_DEVICE); break; } if (descp->pinning_ctx && descp->ctx_put) descp->ctx_put(descp->pinning_ctx); descp->pinning_ctx = NULL; } /* * return the mode as indicated by the first * descriptor in the tx. */ static inline u8 ahg_mode(struct sdma_txreq *tx) { return (tx->descp[0].qw[1] & SDMA_DESC1_HEADER_MODE_SMASK) >> SDMA_DESC1_HEADER_MODE_SHIFT; } /** * __sdma_txclean() - clean tx of mappings, descp *kmalloc's * @dd: hfi1_devdata for unmapping * @tx: tx request to clean * * This is used in the progress routine to clean the tx or * by the ULP to toss an in-process tx build. * * The code can be called multiple times without issue. * */ void __sdma_txclean( struct hfi1_devdata *dd, struct sdma_txreq *tx) { u16 i; if (tx->num_desc) { u8 skip = 0, mode = ahg_mode(tx); /* unmap first */ sdma_unmap_desc(dd, &tx->descp[0]); /* determine number of AHG descriptors to skip */ if (mode > SDMA_AHG_APPLY_UPDATE1) skip = mode >> 1; for (i = 1 + skip; i < tx->num_desc; i++) sdma_unmap_desc(dd, &tx->descp[i]); tx->num_desc = 0; } kfree(tx->coalesce_buf); tx->coalesce_buf = NULL; /* kmalloc'ed descp */ if (unlikely(tx->desc_limit > ARRAY_SIZE(tx->descs))) { tx->desc_limit = ARRAY_SIZE(tx->descs); kfree(tx->descp); } } static inline u16 sdma_gethead(struct sdma_engine *sde) { struct hfi1_devdata *dd = sde->dd; int use_dmahead; u16 hwhead; #ifdef CONFIG_SDMA_VERBOSITY dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); #endif retry: use_dmahead = HFI1_CAP_IS_KSET(USE_SDMA_HEAD) && __sdma_running(sde) && (dd->flags & HFI1_HAS_SDMA_TIMEOUT); hwhead = use_dmahead ? (u16)le64_to_cpu(*sde->head_dma) : (u16)read_sde_csr(sde, SD(HEAD)); if (unlikely(HFI1_CAP_IS_KSET(SDMA_HEAD_CHECK))) { u16 cnt; u16 swtail; u16 swhead; int sane; swhead = sde->descq_head & sde->sdma_mask; /* this code is really bad for cache line trading */ swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask; cnt = sde->descq_cnt; if (swhead < swtail) /* not wrapped */ sane = (hwhead >= swhead) & (hwhead <= swtail); else if (swhead > swtail) /* wrapped around */ sane = ((hwhead >= swhead) && (hwhead < cnt)) || (hwhead <= swtail); else /* empty */ sane = (hwhead == swhead); if (unlikely(!sane)) { dd_dev_err(dd, "SDMA(%u) bad head (%s) hwhd=%u swhd=%u swtl=%u cnt=%u\n", sde->this_idx, use_dmahead ? "dma" : "kreg", hwhead, swhead, swtail, cnt); if (use_dmahead) { /* try one more time, using csr */ use_dmahead = 0; goto retry; } /* proceed as if no progress */ hwhead = swhead; } } return hwhead; } /* * This is called when there are send DMA descriptors that might be * available. * * This is called with head_lock held. */ static void sdma_desc_avail(struct sdma_engine *sde, uint avail) { struct iowait *wait, *nw, *twait; struct iowait *waits[SDMA_WAIT_BATCH_SIZE]; uint i, n = 0, seq, tidx = 0; #ifdef CONFIG_SDMA_VERBOSITY dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); dd_dev_err(sde->dd, "avail: %u\n", avail); #endif do { seq = read_seqbegin(&sde->waitlock); if (!list_empty(&sde->dmawait)) { /* at least one item */ write_seqlock(&sde->waitlock); /* Harvest waiters wanting DMA descriptors */ list_for_each_entry_safe( wait, nw, &sde->dmawait, list) { u32 num_desc; if (!wait->wakeup) continue; if (n == ARRAY_SIZE(waits)) break; iowait_init_priority(wait); num_desc = iowait_get_all_desc(wait); if (num_desc > avail) break; avail -= num_desc; /* Find the top-priority wait memeber */ if (n) { twait = waits[tidx]; tidx = iowait_priority_update_top(wait, twait, n, tidx); } list_del_init(&wait->list); waits[n++] = wait; } write_sequnlock(&sde->waitlock); break; } } while (read_seqretry(&sde->waitlock, seq)); /* Schedule the top-priority entry first */ if (n) waits[tidx]->wakeup(waits[tidx], SDMA_AVAIL_REASON); for (i = 0; i < n; i++) if (i != tidx) waits[i]->wakeup(waits[i], SDMA_AVAIL_REASON); } /* head_lock must be held */ static void sdma_make_progress(struct sdma_engine *sde, u64 status) { struct sdma_txreq *txp = NULL; int progress = 0; u16 hwhead, swhead; int idle_check_done = 0; hwhead = sdma_gethead(sde); /* The reason for some of the complexity of this code is that * not all descriptors have corresponding txps. So, we have to * be able to skip over descs until we wander into the range of * the next txp on the list. */ retry: txp = get_txhead(sde); swhead = sde->descq_head & sde->sdma_mask; trace_hfi1_sdma_progress(sde, hwhead, swhead, txp); while (swhead != hwhead) { /* advance head, wrap if needed */ swhead = ++sde->descq_head & sde->sdma_mask; /* if now past this txp's descs, do the callback */ if (txp && txp->next_descq_idx == swhead) { /* remove from list */ sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL; complete_tx(sde, txp, SDMA_TXREQ_S_OK); /* see if there is another txp */ txp = get_txhead(sde); } trace_hfi1_sdma_progress(sde, hwhead, swhead, txp); progress++; } /* * The SDMA idle interrupt is not guaranteed to be ordered with respect * to updates to the dma_head location in host memory. The head * value read might not be fully up to date. If there are pending * descriptors and the SDMA idle interrupt fired then read from the * CSR SDMA head instead to get the latest value from the hardware. * The hardware SDMA head should be read at most once in this invocation * of sdma_make_progress(..) which is ensured by idle_check_done flag */ if ((status & sde->idle_mask) && !idle_check_done) { u16 swtail; swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask; if (swtail != hwhead) { hwhead = (u16)read_sde_csr(sde, SD(HEAD)); idle_check_done = 1; goto retry; } } sde->last_status = status; if (progress) sdma_desc_avail(sde, sdma_descq_freecnt(sde)); } /* * sdma_engine_interrupt() - interrupt handler for engine * @sde: sdma engine * @status: sdma interrupt reason * * Status is a mask of the 3 possible interrupts for this engine. It will * contain bits _only_ for this SDMA engine. It will contain at least one * bit, it may contain more. */ void sdma_engine_interrupt(struct sdma_engine *sde, u64 status) { trace_hfi1_sdma_engine_interrupt(sde, status); write_seqlock(&sde->head_lock); sdma_set_desc_cnt(sde, sdma_desct_intr); if (status & sde->idle_mask) sde->idle_int_cnt++; else if (status & sde->progress_mask) sde->progress_int_cnt++; else if (status & sde->int_mask) sde->sdma_int_cnt++; sdma_make_progress(sde, status); write_sequnlock(&sde->head_lock); } /** * sdma_engine_error() - error handler for engine * @sde: sdma engine * @status: sdma interrupt reason */ void sdma_engine_error(struct sdma_engine *sde, u64 status) { unsigned long flags; #ifdef CONFIG_SDMA_VERBOSITY dd_dev_err(sde->dd, "CONFIG SDMA(%u) error status 0x%llx state %s\n", sde->this_idx, (unsigned long long)status, sdma_state_names[sde->state.current_state]); #endif spin_lock_irqsave(&sde->tail_lock, flags); write_seqlock(&sde->head_lock); if (status & ALL_SDMA_ENG_HALT_ERRS) __sdma_process_event(sde, sdma_event_e60_hw_halted); if (status & ~SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK)) { dd_dev_err(sde->dd, "SDMA (%u) engine error: 0x%llx state %s\n", sde->this_idx, (unsigned long long)status, sdma_state_names[sde->state.current_state]); dump_sdma_state(sde); } write_sequnlock(&sde->head_lock); spin_unlock_irqrestore(&sde->tail_lock, flags); } static void sdma_sendctrl(struct sdma_engine *sde, unsigned op) { u64 set_senddmactrl = 0; u64 clr_senddmactrl = 0; unsigned long flags; #ifdef CONFIG_SDMA_VERBOSITY dd_dev_err(sde->dd, "CONFIG SDMA(%u) senddmactrl E=%d I=%d H=%d C=%d\n", sde->this_idx, (op & SDMA_SENDCTRL_OP_ENABLE) ? 1 : 0, (op & SDMA_SENDCTRL_OP_INTENABLE) ? 1 : 0, (op & SDMA_SENDCTRL_OP_HALT) ? 1 : 0, (op & SDMA_SENDCTRL_OP_CLEANUP) ? 1 : 0); #endif if (op & SDMA_SENDCTRL_OP_ENABLE) set_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK); else clr_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK); if (op & SDMA_SENDCTRL_OP_INTENABLE) set_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK); else clr_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK); if (op & SDMA_SENDCTRL_OP_HALT) set_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK); else clr_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK); spin_lock_irqsave(&sde->senddmactrl_lock, flags); sde->p_senddmactrl |= set_senddmactrl; sde->p_senddmactrl &= ~clr_senddmactrl; if (op & SDMA_SENDCTRL_OP_CLEANUP) write_sde_csr(sde, SD(CTRL), sde->p_senddmactrl | SD(CTRL_SDMA_CLEANUP_SMASK)); else write_sde_csr(sde, SD(CTRL), sde->p_senddmactrl); spin_unlock_irqrestore(&sde->senddmactrl_lock, flags); #ifdef CONFIG_SDMA_VERBOSITY sdma_dumpstate(sde); #endif } static void sdma_setlengen(struct sdma_engine *sde) { #ifdef CONFIG_SDMA_VERBOSITY dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); #endif /* * Set SendDmaLenGen and clear-then-set the MSB of the generation * count to enable generation checking and load the internal * generation counter. */ write_sde_csr(sde, SD(LEN_GEN), (sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT)); write_sde_csr(sde, SD(LEN_GEN), ((sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT)) | (4ULL << SD(LEN_GEN_GENERATION_SHIFT))); } static inline void sdma_update_tail(struct sdma_engine *sde, u16 tail) { /* Commit writes to memory and advance the tail on the chip */ smp_wmb(); /* see get_txhead() */ writeq(tail, sde->tail_csr); } /* * This is called when changing to state s10_hw_start_up_halt_wait as * a result of send buffer errors or send DMA descriptor errors. */ static void sdma_hw_start_up(struct sdma_engine *sde) { u64 reg; #ifdef CONFIG_SDMA_VERBOSITY dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); #endif sdma_setlengen(sde); sdma_update_tail(sde, 0); /* Set SendDmaTail */ *sde->head_dma = 0; reg = SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_MASK) << SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SHIFT); write_sde_csr(sde, SD(ENG_ERR_CLEAR), reg); } /* * set_sdma_integrity * * Set the SEND_DMA_CHECK_ENABLE register for send DMA engine 'sde'. */ static void set_sdma_integrity(struct sdma_engine *sde) { struct hfi1_devdata *dd = sde->dd; write_sde_csr(sde, SD(CHECK_ENABLE), hfi1_pkt_base_sdma_integrity(dd)); } static void init_sdma_regs( struct sdma_engine *sde, u32 credits, uint idle_cnt) { u8 opval, opmask; #ifdef CONFIG_SDMA_VERBOSITY struct hfi1_devdata *dd = sde->dd; dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); #endif write_sde_csr(sde, SD(BASE_ADDR), sde->descq_phys); sdma_setlengen(sde); sdma_update_tail(sde, 0); /* Set SendDmaTail */ write_sde_csr(sde, SD(RELOAD_CNT), idle_cnt); write_sde_csr(sde, SD(DESC_CNT), 0); write_sde_csr(sde, SD(HEAD_ADDR), sde->head_phys); write_sde_csr(sde, SD(MEMORY), ((u64)credits << SD(MEMORY_SDMA_MEMORY_CNT_SHIFT)) | ((u64)(credits * sde->this_idx) << SD(MEMORY_SDMA_MEMORY_INDEX_SHIFT))); write_sde_csr(sde, SD(ENG_ERR_MASK), ~0ull); set_sdma_integrity(sde); opmask = OPCODE_CHECK_MASK_DISABLED; opval = OPCODE_CHECK_VAL_DISABLED; write_sde_csr(sde, SD(CHECK_OPCODE), (opmask << SEND_CTXT_CHECK_OPCODE_MASK_SHIFT) | (opval << SEND_CTXT_CHECK_OPCODE_VALUE_SHIFT)); } #ifdef CONFIG_SDMA_VERBOSITY #define sdma_dumpstate_helper0(reg) do { \ csr = read_csr(sde->dd, reg); \ dd_dev_err(sde->dd, "%36s 0x%016llx\n", #reg, csr); \ } while (0) #define sdma_dumpstate_helper(reg) do { \ csr = read_sde_csr(sde, reg); \ dd_dev_err(sde->dd, "%36s[%02u] 0x%016llx\n", \ #reg, sde->this_idx, csr); \ } while (0) #define sdma_dumpstate_helper2(reg) do { \ csr = read_csr(sde->dd, reg + (8 * i)); \ dd_dev_err(sde->dd, "%33s_%02u 0x%016llx\n", \ #reg, i, csr); \ } while (0) void sdma_dumpstate(struct sdma_engine *sde) { u64 csr; unsigned i; sdma_dumpstate_helper(SD(CTRL)); sdma_dumpstate_helper(SD(STATUS)); sdma_dumpstate_helper0(SD(ERR_STATUS)); sdma_dumpstate_helper0(SD(ERR_MASK)); sdma_dumpstate_helper(SD(ENG_ERR_STATUS)); sdma_dumpstate_helper(SD(ENG_ERR_MASK)); for (i = 0; i < CCE_NUM_INT_CSRS; ++i) { sdma_dumpstate_helper2(CCE_INT_STATUS); sdma_dumpstate_helper2(CCE_INT_MASK); sdma_dumpstate_helper2(CCE_INT_BLOCKED); } sdma_dumpstate_helper(SD(TAIL)); sdma_dumpstate_helper(SD(HEAD)); sdma_dumpstate_helper(SD(PRIORITY_THLD)); sdma_dumpstate_helper(SD(IDLE_CNT)); sdma_dumpstate_helper(SD(RELOAD_CNT)); sdma_dumpstate_helper(SD(DESC_CNT)); sdma_dumpstate_helper(SD(DESC_FETCHED_CNT)); sdma_dumpstate_helper(SD(MEMORY)); sdma_dumpstate_helper0(SD(ENGINES)); sdma_dumpstate_helper0(SD(MEM_SIZE)); /* sdma_dumpstate_helper(SEND_EGRESS_SEND_DMA_STATUS); */ sdma_dumpstate_helper(SD(BASE_ADDR)); sdma_dumpstate_helper(SD(LEN_GEN)); sdma_dumpstate_helper(SD(HEAD_ADDR)); sdma_dumpstate_helper(SD(CHECK_ENABLE)); sdma_dumpstate_helper(SD(CHECK_VL)); sdma_dumpstate_helper(SD(CHECK_JOB_KEY)); sdma_dumpstate_helper(SD(CHECK_PARTITION_KEY)); sdma_dumpstate_helper(SD(CHECK_SLID)); sdma_dumpstate_helper(SD(CHECK_OPCODE)); } #endif static void dump_sdma_state(struct sdma_engine *sde) { struct hw_sdma_desc *descqp; u64 desc[2]; u64 addr; u8 gen; u16 len; u16 head, tail, cnt; head = sde->descq_head & sde->sdma_mask; tail = sde->descq_tail & sde->sdma_mask; cnt = sdma_descq_freecnt(sde); dd_dev_err(sde->dd, "SDMA (%u) descq_head: %u descq_tail: %u freecnt: %u FLE %d\n", sde->this_idx, head, tail, cnt, !list_empty(&sde->flushlist)); /* print info for each entry in the descriptor queue */ while (head != tail) { char flags[6] = { 'x', 'x', 'x', 'x', 0 }; descqp = &sde->descq[head]; desc[0] = le64_to_cpu(descqp->qw[0]); desc[1] = le64_to_cpu(descqp->qw[1]); flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-'; flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ? 'H' : '-'; flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-'; flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-'; addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT) & SDMA_DESC0_PHY_ADDR_MASK; gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT) & SDMA_DESC1_GENERATION_MASK; len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT) & SDMA_DESC0_BYTE_COUNT_MASK; dd_dev_err(sde->dd, "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n", head, flags, addr, gen, len); dd_dev_err(sde->dd, "\tdesc0:0x%016llx desc1 0x%016llx\n", desc[0], desc[1]); if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) dd_dev_err(sde->dd, "\taidx: %u amode: %u alen: %u\n", (u8)((desc[1] & SDMA_DESC1_HEADER_INDEX_SMASK) >> SDMA_DESC1_HEADER_INDEX_SHIFT), (u8)((desc[1] & SDMA_DESC1_HEADER_MODE_SMASK) >> SDMA_DESC1_HEADER_MODE_SHIFT), (u8)((desc[1] & SDMA_DESC1_HEADER_DWS_SMASK) >> SDMA_DESC1_HEADER_DWS_SHIFT)); head++; head &= sde->sdma_mask; } } #define SDE_FMT \ "SDE %u CPU %d STE %s C 0x%llx S 0x%016llx E 0x%llx T(HW) 0x%llx T(SW) 0x%x H(HW) 0x%llx H(SW) 0x%x H(D) 0x%llx DM 0x%llx GL 0x%llx R 0x%llx LIS 0x%llx AHGI 0x%llx TXT %u TXH %u DT %u DH %u FLNE %d DQF %u SLC 0x%llx\n" /** * sdma_seqfile_dump_sde() - debugfs dump of sde * @s: seq file * @sde: send dma engine to dump * * This routine dumps the sde to the indicated seq file. */ void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde) { u16 head, tail; struct hw_sdma_desc *descqp; u64 desc[2]; u64 addr; u8 gen; u16 len; head = sde->descq_head & sde->sdma_mask; tail = READ_ONCE(sde->descq_tail) & sde->sdma_mask; seq_printf(s, SDE_FMT, sde->this_idx, sde->cpu, sdma_state_name(sde->state.current_state), (unsigned long long)read_sde_csr(sde, SD(CTRL)), (unsigned long long)read_sde_csr(sde, SD(STATUS)), (unsigned long long)read_sde_csr(sde, SD(ENG_ERR_STATUS)), (unsigned long long)read_sde_csr(sde, SD(TAIL)), tail, (unsigned long long)read_sde_csr(sde, SD(HEAD)), head, (unsigned long long)le64_to_cpu(*sde->head_dma), (unsigned long long)read_sde_csr(sde, SD(MEMORY)), (unsigned long long)read_sde_csr(sde, SD(LEN_GEN)), (unsigned long long)read_sde_csr(sde, SD(RELOAD_CNT)), (unsigned long long)sde->last_status, (unsigned long long)sde->ahg_bits, sde->tx_tail, sde->tx_head, sde->descq_tail, sde->descq_head, !list_empty(&sde->flushlist), sde->descq_full_count, (unsigned long long)read_sde_csr(sde, SEND_DMA_CHECK_SLID)); /* print info for each entry in the descriptor queue */ while (head != tail) { char flags[6] = { 'x', 'x', 'x', 'x', 0 }; descqp = &sde->descq[head]; desc[0] = le64_to_cpu(descqp->qw[0]); desc[1] = le64_to_cpu(descqp->qw[1]); flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-'; flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ? 'H' : '-'; flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-'; flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-'; addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT) & SDMA_DESC0_PHY_ADDR_MASK; gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT) & SDMA_DESC1_GENERATION_MASK; len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT) & SDMA_DESC0_BYTE_COUNT_MASK; seq_printf(s, "\tdesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n", head, flags, addr, gen, len); if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) seq_printf(s, "\t\tahgidx: %u ahgmode: %u\n", (u8)((desc[1] & SDMA_DESC1_HEADER_INDEX_SMASK) >> SDMA_DESC1_HEADER_INDEX_SHIFT), (u8)((desc[1] & SDMA_DESC1_HEADER_MODE_SMASK) >> SDMA_DESC1_HEADER_MODE_SHIFT)); head = (head + 1) & sde->sdma_mask; } } /* * add the generation number into * the qw1 and return */ static inline u64 add_gen(struct sdma_engine *sde, u64 qw1) { u8 generation = (sde->descq_tail >> sde->sdma_shift) & 3; qw1 &= ~SDMA_DESC1_GENERATION_SMASK; qw1 |= ((u64)generation & SDMA_DESC1_GENERATION_MASK) << SDMA_DESC1_GENERATION_SHIFT; return qw1; } /* * This routine submits the indicated tx * * Space has already been guaranteed and * tail side of ring is locked. * * The hardware tail update is done * in the caller and that is facilitated * by returning the new tail. * * There is special case logic for ahg * to not add the generation number for * up to 2 descriptors that follow the * first descriptor. * */ static inline u16 submit_tx(struct sdma_engine *sde, struct sdma_txreq *tx) { int i; u16 tail; struct sdma_desc *descp = tx->descp; u8 skip = 0, mode = ahg_mode(tx); tail = sde->descq_tail & sde->sdma_mask; sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]); sde->descq[tail].qw[1] = cpu_to_le64(add_gen(sde, descp->qw[1])); trace_hfi1_sdma_descriptor(sde, descp->qw[0], descp->qw[1], tail, &sde->descq[tail]); tail = ++sde->descq_tail & sde->sdma_mask; descp++; if (mode > SDMA_AHG_APPLY_UPDATE1) skip = mode >> 1; for (i = 1; i < tx->num_desc; i++, descp++) { u64 qw1; sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]); if (skip) { /* edits don't have generation */ qw1 = descp->qw[1]; skip--; } else { /* replace generation with real one for non-edits */ qw1 = add_gen(sde, descp->qw[1]); } sde->descq[tail].qw[1] = cpu_to_le64(qw1); trace_hfi1_sdma_descriptor(sde, descp->qw[0], qw1, tail, &sde->descq[tail]); tail = ++sde->descq_tail & sde->sdma_mask; } tx->next_descq_idx = tail; #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER tx->sn = sde->tail_sn++; trace_hfi1_sdma_in_sn(sde, tx->sn); WARN_ON_ONCE(sde->tx_ring[sde->tx_tail & sde->sdma_mask]); #endif sde->tx_ring[sde->tx_tail++ & sde->sdma_mask] = tx; sde->desc_avail -= tx->num_desc; return tail; } /* * Check for progress */ static int sdma_check_progress( struct sdma_engine *sde, struct iowait_work *wait, struct sdma_txreq *tx, bool pkts_sent) { int ret; sde->desc_avail = sdma_descq_freecnt(sde); if (tx->num_desc <= sde->desc_avail) return -EAGAIN; /* pulse the head_lock */ if (wait && iowait_ioww_to_iow(wait)->sleep) { unsigned seq; seq = raw_seqcount_begin( (const seqcount_t *)&sde->head_lock.seqcount); ret = wait->iow->sleep(sde, wait, tx, seq, pkts_sent); if (ret == -EAGAIN) sde->desc_avail = sdma_descq_freecnt(sde); } else { ret = -EBUSY; } return ret; } /** * sdma_send_txreq() - submit a tx req to ring * @sde: sdma engine to use * @wait: SE wait structure to use when full (may be NULL) * @tx: sdma_txreq to submit * @pkts_sent: has any packet been sent yet? * * The call submits the tx into the ring. If a iowait structure is non-NULL * the packet will be queued to the list in wait. * * Return: * 0 - Success, -EINVAL - sdma_txreq incomplete, -EBUSY - no space in * ring (wait == NULL) * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state */ int sdma_send_txreq(struct sdma_engine *sde, struct iowait_work *wait, struct sdma_txreq *tx, bool pkts_sent) { int ret = 0; u16 tail; unsigned long flags; /* user should have supplied entire packet */ if (unlikely(tx->tlen)) return -EINVAL; tx->wait = iowait_ioww_to_iow(wait); spin_lock_irqsave(&sde->tail_lock, flags); retry: if (unlikely(!__sdma_running(sde))) goto unlock_noconn; if (unlikely(tx->num_desc > sde->desc_avail)) goto nodesc; tail = submit_tx(sde, tx); if (wait) iowait_sdma_inc(iowait_ioww_to_iow(wait)); sdma_update_tail(sde, tail); unlock: spin_unlock_irqrestore(&sde->tail_lock, flags); return ret; unlock_noconn: if (wait) iowait_sdma_inc(iowait_ioww_to_iow(wait)); tx->next_descq_idx = 0; #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER tx->sn = sde->tail_sn++; trace_hfi1_sdma_in_sn(sde, tx->sn); #endif spin_lock(&sde->flushlist_lock); list_add_tail(&tx->list, &sde->flushlist); spin_unlock(&sde->flushlist_lock); iowait_inc_wait_count(wait, tx->num_desc); queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker); ret = -ECOMM; goto unlock; nodesc: ret = sdma_check_progress(sde, wait, tx, pkts_sent); if (ret == -EAGAIN) { ret = 0; goto retry; } sde->descq_full_count++; goto unlock; } /** * sdma_send_txlist() - submit a list of tx req to ring * @sde: sdma engine to use * @wait: SE wait structure to use when full (may be NULL) * @tx_list: list of sdma_txreqs to submit * @count_out: pointer to a u16 which, after return will contain the total number of * sdma_txreqs removed from the tx_list. This will include sdma_txreqs * whose SDMA descriptors are submitted to the ring and the sdma_txreqs * which are added to SDMA engine flush list if the SDMA engine state is * not running. * * The call submits the list into the ring. * * If the iowait structure is non-NULL and not equal to the iowait list * the unprocessed part of the list will be appended to the list in wait. * * In all cases, the tx_list will be updated so the head of the tx_list is * the list of descriptors that have yet to be transmitted. * * The intent of this call is to provide a more efficient * way of submitting multiple packets to SDMA while holding the tail * side locking. * * Return: * 0 - Success, * -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring (wait == NULL) * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state */ int sdma_send_txlist(struct sdma_engine *sde, struct iowait_work *wait, struct list_head *tx_list, u16 *count_out) { struct sdma_txreq *tx, *tx_next; int ret = 0; unsigned long flags; u16 tail = INVALID_TAIL; u32 submit_count = 0, flush_count = 0, total_count; spin_lock_irqsave(&sde->tail_lock, flags); retry: list_for_each_entry_safe(tx, tx_next, tx_list, list) { tx->wait = iowait_ioww_to_iow(wait); if (unlikely(!__sdma_running(sde))) goto unlock_noconn; if (unlikely(tx->num_desc > sde->desc_avail)) goto nodesc; if (unlikely(tx->tlen)) { ret = -EINVAL; goto update_tail; } list_del_init(&tx->list); tail = submit_tx(sde, tx); submit_count++; if (tail != INVALID_TAIL && (submit_count & SDMA_TAIL_UPDATE_THRESH) == 0) { sdma_update_tail(sde, tail); tail = INVALID_TAIL; } } update_tail: total_count = submit_count + flush_count; if (wait) { iowait_sdma_add(iowait_ioww_to_iow(wait), total_count); iowait_starve_clear(submit_count > 0, iowait_ioww_to_iow(wait)); } if (tail != INVALID_TAIL) sdma_update_tail(sde, tail); spin_unlock_irqrestore(&sde->tail_lock, flags); *count_out = total_count; return ret; unlock_noconn: spin_lock(&sde->flushlist_lock); list_for_each_entry_safe(tx, tx_next, tx_list, list) { tx->wait = iowait_ioww_to_iow(wait); list_del_init(&tx->list); tx->next_descq_idx = 0; #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER tx->sn = sde->tail_sn++; trace_hfi1_sdma_in_sn(sde, tx->sn); #endif list_add_tail(&tx->list, &sde->flushlist); flush_count++; iowait_inc_wait_count(wait, tx->num_desc); } spin_unlock(&sde->flushlist_lock); queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker); ret = -ECOMM; goto update_tail; nodesc: ret = sdma_check_progress(sde, wait, tx, submit_count > 0); if (ret == -EAGAIN) { ret = 0; goto retry; } sde->descq_full_count++; goto update_tail; } static void sdma_process_event(struct sdma_engine *sde, enum sdma_events event) { unsigned long flags; spin_lock_irqsave(&sde->tail_lock, flags); write_seqlock(&sde->head_lock); __sdma_process_event(sde, event); if (sde->state.current_state == sdma_state_s99_running) sdma_desc_avail(sde, sdma_descq_freecnt(sde)); write_sequnlock(&sde->head_lock); spin_unlock_irqrestore(&sde->tail_lock, flags); } static void __sdma_process_event(struct sdma_engine *sde, enum sdma_events event) { struct sdma_state *ss = &sde->state; int need_progress = 0; /* CONFIG SDMA temporary */ #ifdef CONFIG_SDMA_VERBOSITY dd_dev_err(sde->dd, "CONFIG SDMA(%u) [%s] %s\n", sde->this_idx, sdma_state_names[ss->current_state], sdma_event_names[event]); #endif switch (ss->current_state) { case sdma_state_s00_hw_down: switch (event) { case sdma_event_e00_go_hw_down: break; case sdma_event_e30_go_running: /* * If down, but running requested (usually result * of link up, then we need to start up. * This can happen when hw down is requested while * bringing the link up with traffic active on * 7220, e.g. */ ss->go_s99_running = 1; fallthrough; /* and start dma engine */ case sdma_event_e10_go_hw_start: /* This reference means the state machine is started */ sdma_get(&sde->state); sdma_set_state(sde, sdma_state_s10_hw_start_up_halt_wait); break; case sdma_event_e15_hw_halt_done: break; case sdma_event_e25_hw_clean_up_done: break; case sdma_event_e40_sw_cleaned: sdma_sw_tear_down(sde); break; case sdma_event_e50_hw_cleaned: break; case sdma_event_e60_hw_halted: break; case sdma_event_e70_go_idle: break; case sdma_event_e80_hw_freeze: break; case sdma_event_e81_hw_frozen: break; case sdma_event_e82_hw_unfreeze: break; case sdma_event_e85_link_down: break; case sdma_event_e90_sw_halted: break; } break; case sdma_state_s10_hw_start_up_halt_wait: switch (event) { case sdma_event_e00_go_hw_down: sdma_set_state(sde, sdma_state_s00_hw_down); sdma_sw_tear_down(sde); break; case sdma_event_e10_go_hw_start: break; case sdma_event_e15_hw_halt_done: sdma_set_state(sde, sdma_state_s15_hw_start_up_clean_wait); sdma_start_hw_clean_up(sde); break; case sdma_event_e25_hw_clean_up_done: break; case sdma_event_e30_go_running: ss->go_s99_running = 1; break; case sdma_event_e40_sw_cleaned: break; case sdma_event_e50_hw_cleaned: break; case sdma_event_e60_hw_halted: schedule_work(&sde->err_halt_worker); break; case sdma_event_e70_go_idle: ss->go_s99_running = 0; break; case sdma_event_e80_hw_freeze: break; case sdma_event_e81_hw_frozen: break; case sdma_event_e82_hw_unfreeze: break; case sdma_event_e85_link_down: break; case sdma_event_e90_sw_halted: break; } break; case sdma_state_s15_hw_start_up_clean_wait: switch (event) { case sdma_event_e00_go_hw_down: sdma_set_state(sde, sdma_state_s00_hw_down); sdma_sw_tear_down(sde); break; case sdma_event_e10_go_hw_start: break; case sdma_event_e15_hw_halt_done: break; case sdma_event_e25_hw_clean_up_done: sdma_hw_start_up(sde); sdma_set_state(sde, ss->go_s99_running ? sdma_state_s99_running : sdma_state_s20_idle); break; case sdma_event_e30_go_running: ss->go_s99_running = 1; break; case sdma_event_e40_sw_cleaned: break; case sdma_event_e50_hw_cleaned: break; case sdma_event_e60_hw_halted: break; case sdma_event_e70_go_idle: ss->go_s99_running = 0; break; case sdma_event_e80_hw_freeze: break; case sdma_event_e81_hw_frozen: break; case sdma_event_e82_hw_unfreeze: break; case sdma_event_e85_link_down: break; case sdma_event_e90_sw_halted: break; } break; case sdma_state_s20_idle: switch (event) { case sdma_event_e00_go_hw_down: sdma_set_state(sde, sdma_state_s00_hw_down); sdma_sw_tear_down(sde); break; case sdma_event_e10_go_hw_start: break; case sdma_event_e15_hw_halt_done: break; case sdma_event_e25_hw_clean_up_done: break; case sdma_event_e30_go_running: sdma_set_state(sde, sdma_state_s99_running); ss->go_s99_running = 1; break; case sdma_event_e40_sw_cleaned: break; case sdma_event_e50_hw_cleaned: break; case sdma_event_e60_hw_halted: sdma_set_state(sde, sdma_state_s50_hw_halt_wait); schedule_work(&sde->err_halt_worker); break; case sdma_event_e70_go_idle: break; case sdma_event_e85_link_down: case sdma_event_e80_hw_freeze: sdma_set_state(sde, sdma_state_s80_hw_freeze); atomic_dec(&sde->dd->sdma_unfreeze_count); wake_up_interruptible(&sde->dd->sdma_unfreeze_wq); break; case sdma_event_e81_hw_frozen: break; case sdma_event_e82_hw_unfreeze: break; case sdma_event_e90_sw_halted: break; } break; case sdma_state_s30_sw_clean_up_wait: switch (event) { case sdma_event_e00_go_hw_down: sdma_set_state(sde, sdma_state_s00_hw_down); break; case sdma_event_e10_go_hw_start: break; case sdma_event_e15_hw_halt_done: break; case sdma_event_e25_hw_clean_up_done: break; case sdma_event_e30_go_running: ss->go_s99_running = 1; break; case sdma_event_e40_sw_cleaned: sdma_set_state(sde, sdma_state_s40_hw_clean_up_wait); sdma_start_hw_clean_up(sde); break; case sdma_event_e50_hw_cleaned: break; case sdma_event_e60_hw_halted: break; case sdma_event_e70_go_idle: ss->go_s99_running = 0; break; case sdma_event_e80_hw_freeze: break; case sdma_event_e81_hw_frozen: break; case sdma_event_e82_hw_unfreeze: break; case sdma_event_e85_link_down: ss->go_s99_running = 0; break; case sdma_event_e90_sw_halted: break; } break; case sdma_state_s40_hw_clean_up_wait: switch (event) { case sdma_event_e00_go_hw_down: sdma_set_state(sde, sdma_state_s00_hw_down); tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); break; case sdma_event_e10_go_hw_start: break; case sdma_event_e15_hw_halt_done: break; case sdma_event_e25_hw_clean_up_done: sdma_hw_start_up(sde); sdma_set_state(sde, ss->go_s99_running ? sdma_state_s99_running : sdma_state_s20_idle); break; case sdma_event_e30_go_running: ss->go_s99_running = 1; break; case sdma_event_e40_sw_cleaned: break; case sdma_event_e50_hw_cleaned: break; case sdma_event_e60_hw_halted: break; case sdma_event_e70_go_idle: ss->go_s99_running = 0; break; case sdma_event_e80_hw_freeze: break; case sdma_event_e81_hw_frozen: break; case sdma_event_e82_hw_unfreeze: break; case sdma_event_e85_link_down: ss->go_s99_running = 0; break; case sdma_event_e90_sw_halted: break; } break; case sdma_state_s50_hw_halt_wait: switch (event) { case sdma_event_e00_go_hw_down: sdma_set_state(sde, sdma_state_s00_hw_down); tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); break; case sdma_event_e10_go_hw_start: break; case sdma_event_e15_hw_halt_done: sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait); tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); break; case sdma_event_e25_hw_clean_up_done: break; case sdma_event_e30_go_running: ss->go_s99_running = 1; break; case sdma_event_e40_sw_cleaned: break; case sdma_event_e50_hw_cleaned: break; case sdma_event_e60_hw_halted: schedule_work(&sde->err_halt_worker); break; case sdma_event_e70_go_idle: ss->go_s99_running = 0; break; case sdma_event_e80_hw_freeze: break; case sdma_event_e81_hw_frozen: break; case sdma_event_e82_hw_unfreeze: break; case sdma_event_e85_link_down: ss->go_s99_running = 0; break; case sdma_event_e90_sw_halted: break; } break; case sdma_state_s60_idle_halt_wait: switch (event) { case sdma_event_e00_go_hw_down: sdma_set_state(sde, sdma_state_s00_hw_down); tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); break; case sdma_event_e10_go_hw_start: break; case sdma_event_e15_hw_halt_done: sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait); tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); break; case sdma_event_e25_hw_clean_up_done: break; case sdma_event_e30_go_running: ss->go_s99_running = 1; break; case sdma_event_e40_sw_cleaned: break; case sdma_event_e50_hw_cleaned: break; case sdma_event_e60_hw_halted: schedule_work(&sde->err_halt_worker); break; case sdma_event_e70_go_idle: ss->go_s99_running = 0; break; case sdma_event_e80_hw_freeze: break; case sdma_event_e81_hw_frozen: break; case sdma_event_e82_hw_unfreeze: break; case sdma_event_e85_link_down: break; case sdma_event_e90_sw_halted: break; } break; case sdma_state_s80_hw_freeze: switch (event) { case sdma_event_e00_go_hw_down: sdma_set_state(sde, sdma_state_s00_hw_down); tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); break; case sdma_event_e10_go_hw_start: break; case sdma_event_e15_hw_halt_done: break; case sdma_event_e25_hw_clean_up_done: break; case sdma_event_e30_go_running: ss->go_s99_running = 1; break; case sdma_event_e40_sw_cleaned: break; case sdma_event_e50_hw_cleaned: break; case sdma_event_e60_hw_halted: break; case sdma_event_e70_go_idle: ss->go_s99_running = 0; break; case sdma_event_e80_hw_freeze: break; case sdma_event_e81_hw_frozen: sdma_set_state(sde, sdma_state_s82_freeze_sw_clean); tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); break; case sdma_event_e82_hw_unfreeze: break; case sdma_event_e85_link_down: break; case sdma_event_e90_sw_halted: break; } break; case sdma_state_s82_freeze_sw_clean: switch (event) { case sdma_event_e00_go_hw_down: sdma_set_state(sde, sdma_state_s00_hw_down); tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); break; case sdma_event_e10_go_hw_start: break; case sdma_event_e15_hw_halt_done: break; case sdma_event_e25_hw_clean_up_done: break; case sdma_event_e30_go_running: ss->go_s99_running = 1; break; case sdma_event_e40_sw_cleaned: /* notify caller this engine is done cleaning */ atomic_dec(&sde->dd->sdma_unfreeze_count); wake_up_interruptible(&sde->dd->sdma_unfreeze_wq); break; case sdma_event_e50_hw_cleaned: break; case sdma_event_e60_hw_halted: break; case sdma_event_e70_go_idle: ss->go_s99_running = 0; break; case sdma_event_e80_hw_freeze: break; case sdma_event_e81_hw_frozen: break; case sdma_event_e82_hw_unfreeze: sdma_hw_start_up(sde); sdma_set_state(sde, ss->go_s99_running ? sdma_state_s99_running : sdma_state_s20_idle); break; case sdma_event_e85_link_down: break; case sdma_event_e90_sw_halted: break; } break; case sdma_state_s99_running: switch (event) { case sdma_event_e00_go_hw_down: sdma_set_state(sde, sdma_state_s00_hw_down); tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); break; case sdma_event_e10_go_hw_start: break; case sdma_event_e15_hw_halt_done: break; case sdma_event_e25_hw_clean_up_done: break; case sdma_event_e30_go_running: break; case sdma_event_e40_sw_cleaned: break; case sdma_event_e50_hw_cleaned: break; case sdma_event_e60_hw_halted: need_progress = 1; sdma_err_progress_check_schedule(sde); fallthrough; case sdma_event_e90_sw_halted: /* * SW initiated halt does not perform engines * progress check */ sdma_set_state(sde, sdma_state_s50_hw_halt_wait); schedule_work(&sde->err_halt_worker); break; case sdma_event_e70_go_idle: sdma_set_state(sde, sdma_state_s60_idle_halt_wait); break; case sdma_event_e85_link_down: ss->go_s99_running = 0; fallthrough; case sdma_event_e80_hw_freeze: sdma_set_state(sde, sdma_state_s80_hw_freeze); atomic_dec(&sde->dd->sdma_unfreeze_count); wake_up_interruptible(&sde->dd->sdma_unfreeze_wq); break; case sdma_event_e81_hw_frozen: break; case sdma_event_e82_hw_unfreeze: break; } break; } ss->last_event = event; if (need_progress) sdma_make_progress(sde, 0); } /* * _extend_sdma_tx_descs() - helper to extend txreq * * This is called once the initial nominal allocation * of descriptors in the sdma_txreq is exhausted. * * The code will bump the allocation up to the max * of MAX_DESC (64) descriptors. There doesn't seem * much point in an interim step. The last descriptor * is reserved for coalesce buffer in order to support * cases where input packet has >MAX_DESC iovecs. * */ static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx) { int i; struct sdma_desc *descp; /* Handle last descriptor */ if (unlikely((tx->num_desc == (MAX_DESC - 1)))) { /* if tlen is 0, it is for padding, release last descriptor */ if (!tx->tlen) { tx->desc_limit = MAX_DESC; } else if (!tx->coalesce_buf) { /* allocate coalesce buffer with space for padding */ tx->coalesce_buf = kmalloc(tx->tlen + sizeof(u32), GFP_ATOMIC); if (!tx->coalesce_buf) goto enomem; tx->coalesce_idx = 0; } return 0; } if (unlikely(tx->num_desc == MAX_DESC)) goto enomem; descp = kmalloc_array(MAX_DESC, sizeof(struct sdma_desc), GFP_ATOMIC); if (!descp) goto enomem; tx->descp = descp; /* reserve last descriptor for coalescing */ tx->desc_limit = MAX_DESC - 1; /* copy ones already built */ for (i = 0; i < tx->num_desc; i++) tx->descp[i] = tx->descs[i]; return 0; enomem: __sdma_txclean(dd, tx); return -ENOMEM; } /* * ext_coal_sdma_tx_descs() - extend or coalesce sdma tx descriptors * * This is called once the initial nominal allocation of descriptors * in the sdma_txreq is exhausted. * * This function calls _extend_sdma_tx_descs to extend or allocate * coalesce buffer. If there is a allocated coalesce buffer, it will * copy the input packet data into the coalesce buffer. It also adds * coalesce buffer descriptor once when whole packet is received. * * Return: * <0 - error * 0 - coalescing, don't populate descriptor * 1 - continue with populating descriptor */ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx, int type, void *kvaddr, struct page *page, unsigned long offset, u16 len) { int pad_len, rval; dma_addr_t addr; rval = _extend_sdma_tx_descs(dd, tx); if (rval) { __sdma_txclean(dd, tx); return rval; } /* If coalesce buffer is allocated, copy data into it */ if (tx->coalesce_buf) { if (type == SDMA_MAP_NONE) { __sdma_txclean(dd, tx); return -EINVAL; } if (type == SDMA_MAP_PAGE) { kvaddr = kmap_local_page(page); kvaddr += offset; } else if (WARN_ON(!kvaddr)) { __sdma_txclean(dd, tx); return -EINVAL; } memcpy(tx->coalesce_buf + tx->coalesce_idx, kvaddr, len); tx->coalesce_idx += len; if (type == SDMA_MAP_PAGE) kunmap_local(kvaddr); /* If there is more data, return */ if (tx->tlen - tx->coalesce_idx) return 0; /* Whole packet is received; add any padding */ pad_len = tx->packet_len & (sizeof(u32) - 1); if (pad_len) { pad_len = sizeof(u32) - pad_len; memset(tx->coalesce_buf + tx->coalesce_idx, 0, pad_len); /* padding is taken care of for coalescing case */ tx->packet_len += pad_len; tx->tlen += pad_len; } /* dma map the coalesce buffer */ addr = dma_map_single(&dd->pcidev->dev, tx->coalesce_buf, tx->tlen, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) { __sdma_txclean(dd, tx); return -ENOSPC; } /* Add descriptor for coalesce buffer */ tx->desc_limit = MAX_DESC; return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx, addr, tx->tlen, NULL, NULL, NULL); } return 1; } /* Update sdes when the lmc changes */ void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid) { struct sdma_engine *sde; int i; u64 sreg; sreg = ((mask & SD(CHECK_SLID_MASK_MASK)) << SD(CHECK_SLID_MASK_SHIFT)) | (((lid & mask) & SD(CHECK_SLID_VALUE_MASK)) << SD(CHECK_SLID_VALUE_SHIFT)); for (i = 0; i < dd->num_sdma; i++) { hfi1_cdbg(LINKVERB, "SendDmaEngine[%d].SLID_CHECK = 0x%x", i, (u32)sreg); sde = &dd->per_sdma[i]; write_sde_csr(sde, SD(CHECK_SLID), sreg); } } /* tx not dword sized - pad */ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx) { int rval = 0; if ((unlikely(tx->num_desc + 1 == tx->desc_limit))) { rval = _extend_sdma_tx_descs(dd, tx); if (rval) { __sdma_txclean(dd, tx); return rval; } } /* finish the one just added */ make_tx_sdma_desc( tx, SDMA_MAP_NONE, dd->sdma_pad_phys, sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)), NULL, NULL, NULL); tx->num_desc++; _sdma_close_tx(dd, tx); return rval; } /* * Add ahg to the sdma_txreq * * The logic will consume up to 3 * descriptors at the beginning of * sdma_txreq. */ void _sdma_txreq_ahgadd( struct sdma_txreq *tx, u8 num_ahg, u8 ahg_entry, u32 *ahg, u8 ahg_hlen) { u32 i, shift = 0, desc = 0; u8 mode; WARN_ON_ONCE(num_ahg > 9 || (ahg_hlen & 3) || ahg_hlen == 4); /* compute mode */ if (num_ahg == 1) mode = SDMA_AHG_APPLY_UPDATE1; else if (num_ahg <= 5) mode = SDMA_AHG_APPLY_UPDATE2; else mode = SDMA_AHG_APPLY_UPDATE3; tx->num_desc++; /* initialize to consumed descriptors to zero */ switch (mode) { case SDMA_AHG_APPLY_UPDATE3: tx->num_desc++; tx->descs[2].qw[0] = 0; tx->descs[2].qw[1] = 0; fallthrough; case SDMA_AHG_APPLY_UPDATE2: tx->num_desc++; tx->descs[1].qw[0] = 0; tx->descs[1].qw[1] = 0; break; } ahg_hlen >>= 2; tx->descs[0].qw[1] |= (((u64)ahg_entry & SDMA_DESC1_HEADER_INDEX_MASK) << SDMA_DESC1_HEADER_INDEX_SHIFT) | (((u64)ahg_hlen & SDMA_DESC1_HEADER_DWS_MASK) << SDMA_DESC1_HEADER_DWS_SHIFT) | (((u64)mode & SDMA_DESC1_HEADER_MODE_MASK) << SDMA_DESC1_HEADER_MODE_SHIFT) | (((u64)ahg[0] & SDMA_DESC1_HEADER_UPDATE1_MASK) << SDMA_DESC1_HEADER_UPDATE1_SHIFT); for (i = 0; i < (num_ahg - 1); i++) { if (!shift && !(i & 2)) desc++; tx->descs[desc].qw[!!(i & 2)] |= (((u64)ahg[i + 1]) << shift); shift = (shift + 32) & 63; } } /** * sdma_ahg_alloc - allocate an AHG entry * @sde: engine to allocate from * * Return: * 0-31 when successful, -EOPNOTSUPP if AHG is not enabled, * -ENOSPC if an entry is not available */ int sdma_ahg_alloc(struct sdma_engine *sde) { int nr; int oldbit; if (!sde) { trace_hfi1_ahg_allocate(sde, -EINVAL); return -EINVAL; } while (1) { nr = ffz(READ_ONCE(sde->ahg_bits)); if (nr > 31) { trace_hfi1_ahg_allocate(sde, -ENOSPC); return -ENOSPC; } oldbit = test_and_set_bit(nr, &sde->ahg_bits); if (!oldbit) break; cpu_relax(); } trace_hfi1_ahg_allocate(sde, nr); return nr; } /** * sdma_ahg_free - free an AHG entry * @sde: engine to return AHG entry * @ahg_index: index to free * * This routine frees the indicate AHG entry. */ void sdma_ahg_free(struct sdma_engine *sde, int ahg_index) { if (!sde) return; trace_hfi1_ahg_deallocate(sde, ahg_index); if (ahg_index < 0 || ahg_index > 31) return; clear_bit(ahg_index, &sde->ahg_bits); } /* * SPC freeze handling for SDMA engines. Called when the driver knows * the SPC is going into a freeze but before the freeze is fully * settled. Generally an error interrupt. * * This event will pull the engine out of running so no more entries can be * added to the engine's queue. */ void sdma_freeze_notify(struct hfi1_devdata *dd, int link_down) { int i; enum sdma_events event = link_down ? sdma_event_e85_link_down : sdma_event_e80_hw_freeze; /* set up the wait but do not wait here */ atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma); /* tell all engines to stop running and wait */ for (i = 0; i < dd->num_sdma; i++) sdma_process_event(&dd->per_sdma[i], event); /* sdma_freeze() will wait for all engines to have stopped */ } /* * SPC freeze handling for SDMA engines. Called when the driver knows * the SPC is fully frozen. */ void sdma_freeze(struct hfi1_devdata *dd) { int i; int ret; /* * Make sure all engines have moved out of the running state before * continuing. */ ret = wait_event_interruptible(dd->sdma_unfreeze_wq, atomic_read(&dd->sdma_unfreeze_count) <= 0); /* interrupted or count is negative, then unloading - just exit */ if (ret || atomic_read(&dd->sdma_unfreeze_count) < 0) return; /* set up the count for the next wait */ atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma); /* tell all engines that the SPC is frozen, they can start cleaning */ for (i = 0; i < dd->num_sdma; i++) sdma_process_event(&dd->per_sdma[i], sdma_event_e81_hw_frozen); /* * Wait for everyone to finish software clean before exiting. The * software clean will read engine CSRs, so must be completed before * the next step, which will clear the engine CSRs. */ (void)wait_event_interruptible(dd->sdma_unfreeze_wq, atomic_read(&dd->sdma_unfreeze_count) <= 0); /* no need to check results - done no matter what */ } /* * SPC freeze handling for the SDMA engines. Called after the SPC is unfrozen. * * The SPC freeze acts like a SDMA halt and a hardware clean combined. All * that is left is a software clean. We could do it after the SPC is fully * frozen, but then we'd have to add another state to wait for the unfreeze. * Instead, just defer the software clean until the unfreeze step. */ void sdma_unfreeze(struct hfi1_devdata *dd) { int i; /* tell all engines start freeze clean up */ for (i = 0; i < dd->num_sdma; i++) sdma_process_event(&dd->per_sdma[i], sdma_event_e82_hw_unfreeze); } /** * _sdma_engine_progress_schedule() - schedule progress on engine * @sde: sdma_engine to schedule progress * */ void _sdma_engine_progress_schedule( struct sdma_engine *sde) { trace_hfi1_sdma_engine_progress(sde, sde->progress_mask); /* assume we have selected a good cpu */ write_csr(sde->dd, CCE_INT_FORCE + (8 * (IS_SDMA_START / 64)), sde->progress_mask); }
linux-master
drivers/infiniband/hw/hfi1/sdma.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2015-2017 Intel Corporation. */ #include <linux/ctype.h> #include <rdma/ib_sysfs.h> #include "hfi.h" #include "mad.h" #include "trace.h" static struct hfi1_pportdata *hfi1_get_pportdata_kobj(struct kobject *kobj) { u32 port_num; struct ib_device *ibdev = ib_port_sysfs_get_ibdev_kobj(kobj, &port_num); struct hfi1_devdata *dd = dd_from_ibdev(ibdev); return &dd->pport[port_num - 1]; } /* * Start of per-port congestion control structures and support code */ /* * Congestion control table size followed by table entries */ static ssize_t cc_table_bin_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t pos, size_t count) { int ret; struct hfi1_pportdata *ppd = hfi1_get_pportdata_kobj(kobj); struct cc_state *cc_state; ret = ppd->total_cct_entry * sizeof(struct ib_cc_table_entry_shadow) + sizeof(__be16); if (pos > ret) return -EINVAL; if (count > ret - pos) count = ret - pos; if (!count) return count; rcu_read_lock(); cc_state = get_cc_state(ppd); if (!cc_state) { rcu_read_unlock(); return -EINVAL; } memcpy(buf, (void *)&cc_state->cct + pos, count); rcu_read_unlock(); return count; } static BIN_ATTR_RO(cc_table_bin, PAGE_SIZE); /* * Congestion settings: port control, control map and an array of 16 * entries for the congestion entries - increase, timer, event log * trigger threshold and the minimum injection rate delay. */ static ssize_t cc_setting_bin_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t pos, size_t count) { struct hfi1_pportdata *ppd = hfi1_get_pportdata_kobj(kobj); int ret; struct cc_state *cc_state; ret = sizeof(struct opa_congestion_setting_attr_shadow); if (pos > ret) return -EINVAL; if (count > ret - pos) count = ret - pos; if (!count) return count; rcu_read_lock(); cc_state = get_cc_state(ppd); if (!cc_state) { rcu_read_unlock(); return -EINVAL; } memcpy(buf, (void *)&cc_state->cong_setting + pos, count); rcu_read_unlock(); return count; } static BIN_ATTR_RO(cc_setting_bin, PAGE_SIZE); static struct bin_attribute *port_cc_bin_attributes[] = { &bin_attr_cc_setting_bin, &bin_attr_cc_table_bin, NULL }; static ssize_t cc_prescan_show(struct ib_device *ibdev, u32 port_num, struct ib_port_attribute *attr, char *buf) { struct hfi1_devdata *dd = dd_from_ibdev(ibdev); struct hfi1_pportdata *ppd = &dd->pport[port_num - 1]; return sysfs_emit(buf, "%s\n", ppd->cc_prescan ? "on" : "off"); } static ssize_t cc_prescan_store(struct ib_device *ibdev, u32 port_num, struct ib_port_attribute *attr, const char *buf, size_t count) { struct hfi1_devdata *dd = dd_from_ibdev(ibdev); struct hfi1_pportdata *ppd = &dd->pport[port_num - 1]; if (!memcmp(buf, "on", 2)) ppd->cc_prescan = true; else if (!memcmp(buf, "off", 3)) ppd->cc_prescan = false; return count; } static IB_PORT_ATTR_ADMIN_RW(cc_prescan); static struct attribute *port_cc_attributes[] = { &ib_port_attr_cc_prescan.attr, NULL }; static const struct attribute_group port_cc_group = { .name = "CCMgtA", .attrs = port_cc_attributes, .bin_attrs = port_cc_bin_attributes, }; /* Start sc2vl */ struct hfi1_sc2vl_attr { struct ib_port_attribute attr; int sc; }; static ssize_t sc2vl_attr_show(struct ib_device *ibdev, u32 port_num, struct ib_port_attribute *attr, char *buf) { struct hfi1_sc2vl_attr *sattr = container_of(attr, struct hfi1_sc2vl_attr, attr); struct hfi1_devdata *dd = dd_from_ibdev(ibdev); return sysfs_emit(buf, "%u\n", *((u8 *)dd->sc2vl + sattr->sc)); } #define HFI1_SC2VL_ATTR(N) \ static struct hfi1_sc2vl_attr hfi1_sc2vl_attr_##N = { \ .attr = __ATTR(N, 0444, sc2vl_attr_show, NULL), \ .sc = N, \ } HFI1_SC2VL_ATTR(0); HFI1_SC2VL_ATTR(1); HFI1_SC2VL_ATTR(2); HFI1_SC2VL_ATTR(3); HFI1_SC2VL_ATTR(4); HFI1_SC2VL_ATTR(5); HFI1_SC2VL_ATTR(6); HFI1_SC2VL_ATTR(7); HFI1_SC2VL_ATTR(8); HFI1_SC2VL_ATTR(9); HFI1_SC2VL_ATTR(10); HFI1_SC2VL_ATTR(11); HFI1_SC2VL_ATTR(12); HFI1_SC2VL_ATTR(13); HFI1_SC2VL_ATTR(14); HFI1_SC2VL_ATTR(15); HFI1_SC2VL_ATTR(16); HFI1_SC2VL_ATTR(17); HFI1_SC2VL_ATTR(18); HFI1_SC2VL_ATTR(19); HFI1_SC2VL_ATTR(20); HFI1_SC2VL_ATTR(21); HFI1_SC2VL_ATTR(22); HFI1_SC2VL_ATTR(23); HFI1_SC2VL_ATTR(24); HFI1_SC2VL_ATTR(25); HFI1_SC2VL_ATTR(26); HFI1_SC2VL_ATTR(27); HFI1_SC2VL_ATTR(28); HFI1_SC2VL_ATTR(29); HFI1_SC2VL_ATTR(30); HFI1_SC2VL_ATTR(31); static struct attribute *port_sc2vl_attributes[] = { &hfi1_sc2vl_attr_0.attr.attr, &hfi1_sc2vl_attr_1.attr.attr, &hfi1_sc2vl_attr_2.attr.attr, &hfi1_sc2vl_attr_3.attr.attr, &hfi1_sc2vl_attr_4.attr.attr, &hfi1_sc2vl_attr_5.attr.attr, &hfi1_sc2vl_attr_6.attr.attr, &hfi1_sc2vl_attr_7.attr.attr, &hfi1_sc2vl_attr_8.attr.attr, &hfi1_sc2vl_attr_9.attr.attr, &hfi1_sc2vl_attr_10.attr.attr, &hfi1_sc2vl_attr_11.attr.attr, &hfi1_sc2vl_attr_12.attr.attr, &hfi1_sc2vl_attr_13.attr.attr, &hfi1_sc2vl_attr_14.attr.attr, &hfi1_sc2vl_attr_15.attr.attr, &hfi1_sc2vl_attr_16.attr.attr, &hfi1_sc2vl_attr_17.attr.attr, &hfi1_sc2vl_attr_18.attr.attr, &hfi1_sc2vl_attr_19.attr.attr, &hfi1_sc2vl_attr_20.attr.attr, &hfi1_sc2vl_attr_21.attr.attr, &hfi1_sc2vl_attr_22.attr.attr, &hfi1_sc2vl_attr_23.attr.attr, &hfi1_sc2vl_attr_24.attr.attr, &hfi1_sc2vl_attr_25.attr.attr, &hfi1_sc2vl_attr_26.attr.attr, &hfi1_sc2vl_attr_27.attr.attr, &hfi1_sc2vl_attr_28.attr.attr, &hfi1_sc2vl_attr_29.attr.attr, &hfi1_sc2vl_attr_30.attr.attr, &hfi1_sc2vl_attr_31.attr.attr, NULL }; static const struct attribute_group port_sc2vl_group = { .name = "sc2vl", .attrs = port_sc2vl_attributes, }; /* End sc2vl */ /* Start sl2sc */ struct hfi1_sl2sc_attr { struct ib_port_attribute attr; int sl; }; static ssize_t sl2sc_attr_show(struct ib_device *ibdev, u32 port_num, struct ib_port_attribute *attr, char *buf) { struct hfi1_sl2sc_attr *sattr = container_of(attr, struct hfi1_sl2sc_attr, attr); struct hfi1_devdata *dd = dd_from_ibdev(ibdev); struct hfi1_ibport *ibp = &dd->pport[port_num - 1].ibport_data; return sysfs_emit(buf, "%u\n", ibp->sl_to_sc[sattr->sl]); } #define HFI1_SL2SC_ATTR(N) \ static struct hfi1_sl2sc_attr hfi1_sl2sc_attr_##N = { \ .attr = __ATTR(N, 0444, sl2sc_attr_show, NULL), .sl = N \ } HFI1_SL2SC_ATTR(0); HFI1_SL2SC_ATTR(1); HFI1_SL2SC_ATTR(2); HFI1_SL2SC_ATTR(3); HFI1_SL2SC_ATTR(4); HFI1_SL2SC_ATTR(5); HFI1_SL2SC_ATTR(6); HFI1_SL2SC_ATTR(7); HFI1_SL2SC_ATTR(8); HFI1_SL2SC_ATTR(9); HFI1_SL2SC_ATTR(10); HFI1_SL2SC_ATTR(11); HFI1_SL2SC_ATTR(12); HFI1_SL2SC_ATTR(13); HFI1_SL2SC_ATTR(14); HFI1_SL2SC_ATTR(15); HFI1_SL2SC_ATTR(16); HFI1_SL2SC_ATTR(17); HFI1_SL2SC_ATTR(18); HFI1_SL2SC_ATTR(19); HFI1_SL2SC_ATTR(20); HFI1_SL2SC_ATTR(21); HFI1_SL2SC_ATTR(22); HFI1_SL2SC_ATTR(23); HFI1_SL2SC_ATTR(24); HFI1_SL2SC_ATTR(25); HFI1_SL2SC_ATTR(26); HFI1_SL2SC_ATTR(27); HFI1_SL2SC_ATTR(28); HFI1_SL2SC_ATTR(29); HFI1_SL2SC_ATTR(30); HFI1_SL2SC_ATTR(31); static struct attribute *port_sl2sc_attributes[] = { &hfi1_sl2sc_attr_0.attr.attr, &hfi1_sl2sc_attr_1.attr.attr, &hfi1_sl2sc_attr_2.attr.attr, &hfi1_sl2sc_attr_3.attr.attr, &hfi1_sl2sc_attr_4.attr.attr, &hfi1_sl2sc_attr_5.attr.attr, &hfi1_sl2sc_attr_6.attr.attr, &hfi1_sl2sc_attr_7.attr.attr, &hfi1_sl2sc_attr_8.attr.attr, &hfi1_sl2sc_attr_9.attr.attr, &hfi1_sl2sc_attr_10.attr.attr, &hfi1_sl2sc_attr_11.attr.attr, &hfi1_sl2sc_attr_12.attr.attr, &hfi1_sl2sc_attr_13.attr.attr, &hfi1_sl2sc_attr_14.attr.attr, &hfi1_sl2sc_attr_15.attr.attr, &hfi1_sl2sc_attr_16.attr.attr, &hfi1_sl2sc_attr_17.attr.attr, &hfi1_sl2sc_attr_18.attr.attr, &hfi1_sl2sc_attr_19.attr.attr, &hfi1_sl2sc_attr_20.attr.attr, &hfi1_sl2sc_attr_21.attr.attr, &hfi1_sl2sc_attr_22.attr.attr, &hfi1_sl2sc_attr_23.attr.attr, &hfi1_sl2sc_attr_24.attr.attr, &hfi1_sl2sc_attr_25.attr.attr, &hfi1_sl2sc_attr_26.attr.attr, &hfi1_sl2sc_attr_27.attr.attr, &hfi1_sl2sc_attr_28.attr.attr, &hfi1_sl2sc_attr_29.attr.attr, &hfi1_sl2sc_attr_30.attr.attr, &hfi1_sl2sc_attr_31.attr.attr, NULL }; static const struct attribute_group port_sl2sc_group = { .name = "sl2sc", .attrs = port_sl2sc_attributes, }; /* End sl2sc */ /* Start vl2mtu */ struct hfi1_vl2mtu_attr { struct ib_port_attribute attr; int vl; }; static ssize_t vl2mtu_attr_show(struct ib_device *ibdev, u32 port_num, struct ib_port_attribute *attr, char *buf) { struct hfi1_vl2mtu_attr *vlattr = container_of(attr, struct hfi1_vl2mtu_attr, attr); struct hfi1_devdata *dd = dd_from_ibdev(ibdev); return sysfs_emit(buf, "%u\n", dd->vld[vlattr->vl].mtu); } #define HFI1_VL2MTU_ATTR(N) \ static struct hfi1_vl2mtu_attr hfi1_vl2mtu_attr_##N = { \ .attr = __ATTR(N, 0444, vl2mtu_attr_show, NULL), \ .vl = N, \ } HFI1_VL2MTU_ATTR(0); HFI1_VL2MTU_ATTR(1); HFI1_VL2MTU_ATTR(2); HFI1_VL2MTU_ATTR(3); HFI1_VL2MTU_ATTR(4); HFI1_VL2MTU_ATTR(5); HFI1_VL2MTU_ATTR(6); HFI1_VL2MTU_ATTR(7); HFI1_VL2MTU_ATTR(8); HFI1_VL2MTU_ATTR(9); HFI1_VL2MTU_ATTR(10); HFI1_VL2MTU_ATTR(11); HFI1_VL2MTU_ATTR(12); HFI1_VL2MTU_ATTR(13); HFI1_VL2MTU_ATTR(14); HFI1_VL2MTU_ATTR(15); static struct attribute *port_vl2mtu_attributes[] = { &hfi1_vl2mtu_attr_0.attr.attr, &hfi1_vl2mtu_attr_1.attr.attr, &hfi1_vl2mtu_attr_2.attr.attr, &hfi1_vl2mtu_attr_3.attr.attr, &hfi1_vl2mtu_attr_4.attr.attr, &hfi1_vl2mtu_attr_5.attr.attr, &hfi1_vl2mtu_attr_6.attr.attr, &hfi1_vl2mtu_attr_7.attr.attr, &hfi1_vl2mtu_attr_8.attr.attr, &hfi1_vl2mtu_attr_9.attr.attr, &hfi1_vl2mtu_attr_10.attr.attr, &hfi1_vl2mtu_attr_11.attr.attr, &hfi1_vl2mtu_attr_12.attr.attr, &hfi1_vl2mtu_attr_13.attr.attr, &hfi1_vl2mtu_attr_14.attr.attr, &hfi1_vl2mtu_attr_15.attr.attr, NULL }; static const struct attribute_group port_vl2mtu_group = { .name = "vl2mtu", .attrs = port_vl2mtu_attributes, }; /* end of per-port file structures and support code */ /* * Start of per-unit (or driver, in some cases, but replicated * per unit) functions (these get a device *) */ static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr, char *buf) { struct hfi1_ibdev *dev = rdma_device_to_drv_device(device, struct hfi1_ibdev, rdi.ibdev); return sysfs_emit(buf, "%x\n", dd_from_dev(dev)->minrev); } static DEVICE_ATTR_RO(hw_rev); static ssize_t board_id_show(struct device *device, struct device_attribute *attr, char *buf) { struct hfi1_ibdev *dev = rdma_device_to_drv_device(device, struct hfi1_ibdev, rdi.ibdev); struct hfi1_devdata *dd = dd_from_dev(dev); if (!dd->boardname) return -EINVAL; return sysfs_emit(buf, "%s\n", dd->boardname); } static DEVICE_ATTR_RO(board_id); static ssize_t boardversion_show(struct device *device, struct device_attribute *attr, char *buf) { struct hfi1_ibdev *dev = rdma_device_to_drv_device(device, struct hfi1_ibdev, rdi.ibdev); struct hfi1_devdata *dd = dd_from_dev(dev); /* The string printed here is already newline-terminated. */ return sysfs_emit(buf, "%s", dd->boardversion); } static DEVICE_ATTR_RO(boardversion); static ssize_t nctxts_show(struct device *device, struct device_attribute *attr, char *buf) { struct hfi1_ibdev *dev = rdma_device_to_drv_device(device, struct hfi1_ibdev, rdi.ibdev); struct hfi1_devdata *dd = dd_from_dev(dev); /* * Return the smaller of send and receive contexts. * Normally, user level applications would require both a send * and a receive context, so returning the smaller of the two counts * give a more accurate picture of total contexts available. */ return sysfs_emit(buf, "%u\n", min(dd->num_user_contexts, (u32)dd->sc_sizes[SC_USER].count)); } static DEVICE_ATTR_RO(nctxts); static ssize_t nfreectxts_show(struct device *device, struct device_attribute *attr, char *buf) { struct hfi1_ibdev *dev = rdma_device_to_drv_device(device, struct hfi1_ibdev, rdi.ibdev); struct hfi1_devdata *dd = dd_from_dev(dev); /* Return the number of free user ports (contexts) available. */ return sysfs_emit(buf, "%u\n", dd->freectxts); } static DEVICE_ATTR_RO(nfreectxts); static ssize_t serial_show(struct device *device, struct device_attribute *attr, char *buf) { struct hfi1_ibdev *dev = rdma_device_to_drv_device(device, struct hfi1_ibdev, rdi.ibdev); struct hfi1_devdata *dd = dd_from_dev(dev); /* dd->serial is already newline terminated in chip.c */ return sysfs_emit(buf, "%s", dd->serial); } static DEVICE_ATTR_RO(serial); static ssize_t chip_reset_store(struct device *device, struct device_attribute *attr, const char *buf, size_t count) { struct hfi1_ibdev *dev = rdma_device_to_drv_device(device, struct hfi1_ibdev, rdi.ibdev); struct hfi1_devdata *dd = dd_from_dev(dev); int ret; if (count < 5 || memcmp(buf, "reset", 5) || !dd->diag_client) { ret = -EINVAL; goto bail; } ret = hfi1_reset_device(dd->unit); bail: return ret < 0 ? ret : count; } static DEVICE_ATTR_WO(chip_reset); /* * Convert the reported temperature from an integer (reported in * units of 0.25C) to a floating point number. */ #define temp_d(t) ((t) >> 2) #define temp_f(t) (((t)&0x3) * 25u) /* * Dump tempsense values, in decimal, to ease shell-scripts. */ static ssize_t tempsense_show(struct device *device, struct device_attribute *attr, char *buf) { struct hfi1_ibdev *dev = rdma_device_to_drv_device(device, struct hfi1_ibdev, rdi.ibdev); struct hfi1_devdata *dd = dd_from_dev(dev); struct hfi1_temp temp; int ret; ret = hfi1_tempsense_rd(dd, &temp); if (ret) return ret; return sysfs_emit(buf, "%u.%02u %u.%02u %u.%02u %u.%02u %u %u %u\n", temp_d(temp.curr), temp_f(temp.curr), temp_d(temp.lo_lim), temp_f(temp.lo_lim), temp_d(temp.hi_lim), temp_f(temp.hi_lim), temp_d(temp.crit_lim), temp_f(temp.crit_lim), temp.triggers & 0x1, temp.triggers & 0x2, temp.triggers & 0x4); } static DEVICE_ATTR_RO(tempsense); /* * end of per-unit (or driver, in some cases, but replicated * per unit) functions */ /* start of per-unit file structures and support code */ static struct attribute *hfi1_attributes[] = { &dev_attr_hw_rev.attr, &dev_attr_board_id.attr, &dev_attr_nctxts.attr, &dev_attr_nfreectxts.attr, &dev_attr_serial.attr, &dev_attr_boardversion.attr, &dev_attr_tempsense.attr, &dev_attr_chip_reset.attr, NULL, }; const struct attribute_group ib_hfi1_attr_group = { .attrs = hfi1_attributes, }; const struct attribute_group *hfi1_attr_port_groups[] = { &port_cc_group, &port_sc2vl_group, &port_sl2sc_group, &port_vl2mtu_group, NULL, }; struct sde_attribute { struct attribute attr; ssize_t (*show)(struct sdma_engine *sde, char *buf); ssize_t (*store)(struct sdma_engine *sde, const char *buf, size_t cnt); }; static ssize_t sde_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct sde_attribute *sde_attr = container_of(attr, struct sde_attribute, attr); struct sdma_engine *sde = container_of(kobj, struct sdma_engine, kobj); if (!sde_attr->show) return -EINVAL; return sde_attr->show(sde, buf); } static ssize_t sde_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct sde_attribute *sde_attr = container_of(attr, struct sde_attribute, attr); struct sdma_engine *sde = container_of(kobj, struct sdma_engine, kobj); if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!sde_attr->store) return -EINVAL; return sde_attr->store(sde, buf, count); } static const struct sysfs_ops sde_sysfs_ops = { .show = sde_show, .store = sde_store, }; static struct kobj_type sde_ktype = { .sysfs_ops = &sde_sysfs_ops, }; #define SDE_ATTR(_name, _mode, _show, _store) \ struct sde_attribute sde_attr_##_name = \ __ATTR(_name, _mode, _show, _store) static ssize_t sde_show_cpu_to_sde_map(struct sdma_engine *sde, char *buf) { return sdma_get_cpu_to_sde_map(sde, buf); } static ssize_t sde_store_cpu_to_sde_map(struct sdma_engine *sde, const char *buf, size_t count) { return sdma_set_cpu_to_sde_map(sde, buf, count); } static ssize_t sde_show_vl(struct sdma_engine *sde, char *buf) { int vl; vl = sdma_engine_get_vl(sde); if (vl < 0) return vl; return sysfs_emit(buf, "%d\n", vl); } static SDE_ATTR(cpu_list, S_IWUSR | S_IRUGO, sde_show_cpu_to_sde_map, sde_store_cpu_to_sde_map); static SDE_ATTR(vl, S_IRUGO, sde_show_vl, NULL); static struct sde_attribute *sde_attribs[] = { &sde_attr_cpu_list, &sde_attr_vl }; /* * Register and create our files in /sys/class/infiniband. */ int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd) { struct ib_device *dev = &dd->verbs_dev.rdi.ibdev; struct device *class_dev = &dev->dev; int i, j, ret; for (i = 0; i < dd->num_sdma; i++) { ret = kobject_init_and_add(&dd->per_sdma[i].kobj, &sde_ktype, &class_dev->kobj, "sdma%d", i); if (ret) goto bail; for (j = 0; j < ARRAY_SIZE(sde_attribs); j++) { ret = sysfs_create_file(&dd->per_sdma[i].kobj, &sde_attribs[j]->attr); if (ret) goto bail; } } return 0; bail: /* * The function kobject_put() will call kobject_del() if the kobject * has been added successfully. The sysfs files created under the * kobject directory will also be removed during the process. */ for (; i >= 0; i--) kobject_put(&dd->per_sdma[i].kobj); return ret; } /* * Unregister and remove our files in /sys/class/infiniband. */ void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd) { int i; /* Unwind operations in hfi1_verbs_register_sysfs() */ for (i = 0; i < dd->num_sdma; i++) kobject_put(&dd->per_sdma[i].kobj); }
linux-master
drivers/infiniband/hw/hfi1/sysfs.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2015, 2016 Intel Corporation. */ #include <linux/delay.h> #include "hfi.h" #include "common.h" #include "eprom.h" /* * The EPROM is logically divided into three partitions: * partition 0: the first 128K, visible from PCI ROM BAR * partition 1: 4K config file (sector size) * partition 2: the rest */ #define P0_SIZE (128 * 1024) #define P1_SIZE (4 * 1024) #define P1_START P0_SIZE #define P2_START (P0_SIZE + P1_SIZE) /* controller page size, in bytes */ #define EP_PAGE_SIZE 256 #define EP_PAGE_MASK (EP_PAGE_SIZE - 1) #define EP_PAGE_DWORDS (EP_PAGE_SIZE / sizeof(u32)) /* controller commands */ #define CMD_SHIFT 24 #define CMD_NOP (0) #define CMD_READ_DATA(addr) ((0x03 << CMD_SHIFT) | addr) #define CMD_RELEASE_POWERDOWN_NOID ((0xab << CMD_SHIFT)) /* controller interface speeds */ #define EP_SPEED_FULL 0x2 /* full speed */ /* * How long to wait for the EPROM to become available, in ms. * The spec 32 Mb EPROM takes around 40s to erase then write. * Double it for safety. */ #define EPROM_TIMEOUT 80000 /* ms */ /* * Read a 256 byte (64 dword) EPROM page. * All callers have verified the offset is at a page boundary. */ static void read_page(struct hfi1_devdata *dd, u32 offset, u32 *result) { int i; write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_READ_DATA(offset)); for (i = 0; i < EP_PAGE_DWORDS; i++) result[i] = (u32)read_csr(dd, ASIC_EEP_DATA); write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_NOP); /* close open page */ } /* * Read length bytes starting at offset from the start of the EPROM. */ static int read_length(struct hfi1_devdata *dd, u32 start, u32 len, void *dest) { u32 buffer[EP_PAGE_DWORDS]; u32 end; u32 start_offset; u32 read_start; u32 bytes; if (len == 0) return 0; end = start + len; /* * Make sure the read range is not outside of the controller read * command address range. Note that '>' is correct below - the end * of the range is OK if it stops at the limit, but no higher. */ if (end > (1 << CMD_SHIFT)) return -EINVAL; /* read the first partial page */ start_offset = start & EP_PAGE_MASK; if (start_offset) { /* partial starting page */ /* align and read the page that contains the start */ read_start = start & ~EP_PAGE_MASK; read_page(dd, read_start, buffer); /* the rest of the page is available data */ bytes = EP_PAGE_SIZE - start_offset; if (len <= bytes) { /* end is within this page */ memcpy(dest, (u8 *)buffer + start_offset, len); return 0; } memcpy(dest, (u8 *)buffer + start_offset, bytes); start += bytes; len -= bytes; dest += bytes; } /* start is now page aligned */ /* read whole pages */ while (len >= EP_PAGE_SIZE) { read_page(dd, start, buffer); memcpy(dest, buffer, EP_PAGE_SIZE); start += EP_PAGE_SIZE; len -= EP_PAGE_SIZE; dest += EP_PAGE_SIZE; } /* read the last partial page */ if (len) { read_page(dd, start, buffer); memcpy(dest, buffer, len); } return 0; } /* * Initialize the EPROM handler. */ int eprom_init(struct hfi1_devdata *dd) { int ret = 0; /* only the discrete chip has an EPROM */ if (dd->pcidev->device != PCI_DEVICE_ID_INTEL0) return 0; /* * It is OK if both HFIs reset the EPROM as long as they don't * do it at the same time. */ ret = acquire_chip_resource(dd, CR_EPROM, EPROM_TIMEOUT); if (ret) { dd_dev_err(dd, "%s: unable to acquire EPROM resource, no EPROM support\n", __func__); goto done_asic; } /* reset EPROM to be sure it is in a good state */ /* set reset */ write_csr(dd, ASIC_EEP_CTL_STAT, ASIC_EEP_CTL_STAT_EP_RESET_SMASK); /* clear reset, set speed */ write_csr(dd, ASIC_EEP_CTL_STAT, EP_SPEED_FULL << ASIC_EEP_CTL_STAT_RATE_SPI_SHIFT); /* wake the device with command "release powerdown NoID" */ write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_RELEASE_POWERDOWN_NOID); dd->eprom_available = true; release_chip_resource(dd, CR_EPROM); done_asic: return ret; } /* magic character sequence that begins an image */ #define IMAGE_START_MAGIC "APO=" /* magic character sequence that might trail an image */ #define IMAGE_TRAIL_MAGIC "egamiAPO" /* EPROM file types */ #define HFI1_EFT_PLATFORM_CONFIG 2 /* segment size - 128 KiB */ #define SEG_SIZE (128 * 1024) struct hfi1_eprom_footer { u32 oprom_size; /* size of the oprom, in bytes */ u16 num_table_entries; u16 version; /* version of this footer */ u32 magic; /* must be last */ }; struct hfi1_eprom_table_entry { u32 type; /* file type */ u32 offset; /* file offset from start of EPROM */ u32 size; /* file size, in bytes */ }; /* * Calculate the max number of table entries that will fit within a directory * buffer of size 'dir_size'. */ #define MAX_TABLE_ENTRIES(dir_size) \ (((dir_size) - sizeof(struct hfi1_eprom_footer)) / \ sizeof(struct hfi1_eprom_table_entry)) #define DIRECTORY_SIZE(n) (sizeof(struct hfi1_eprom_footer) + \ (sizeof(struct hfi1_eprom_table_entry) * (n))) #define MAGIC4(a, b, c, d) ((d) << 24 | (c) << 16 | (b) << 8 | (a)) #define FOOTER_MAGIC MAGIC4('e', 'p', 'r', 'm') #define FOOTER_VERSION 1 /* * Read all of partition 1. The actual file is at the front. Adjust * the returned size if a trailing image magic is found. */ static int read_partition_platform_config(struct hfi1_devdata *dd, void **data, u32 *size) { void *buffer; void *p; u32 length; int ret; buffer = kmalloc(P1_SIZE, GFP_KERNEL); if (!buffer) return -ENOMEM; ret = read_length(dd, P1_START, P1_SIZE, buffer); if (ret) { kfree(buffer); return ret; } /* config partition is valid only if it starts with IMAGE_START_MAGIC */ if (memcmp(buffer, IMAGE_START_MAGIC, strlen(IMAGE_START_MAGIC))) { kfree(buffer); return -ENOENT; } /* scan for image magic that may trail the actual data */ p = strnstr(buffer, IMAGE_TRAIL_MAGIC, P1_SIZE); if (p) length = p - buffer; else length = P1_SIZE; *data = buffer; *size = length; return 0; } /* * The segment magic has been checked. There is a footer and table of * contents present. * * directory is a u32 aligned buffer of size EP_PAGE_SIZE. */ static int read_segment_platform_config(struct hfi1_devdata *dd, void *directory, void **data, u32 *size) { struct hfi1_eprom_footer *footer; struct hfi1_eprom_table_entry *table; struct hfi1_eprom_table_entry *entry; void *buffer = NULL; void *table_buffer = NULL; int ret, i; u32 directory_size; u32 seg_base, seg_offset; u32 bytes_available, ncopied, to_copy; /* the footer is at the end of the directory */ footer = (struct hfi1_eprom_footer *) (directory + EP_PAGE_SIZE - sizeof(*footer)); /* make sure the structure version is supported */ if (footer->version != FOOTER_VERSION) return -EINVAL; /* oprom size cannot be larger than a segment */ if (footer->oprom_size >= SEG_SIZE) return -EINVAL; /* the file table must fit in a segment with the oprom */ if (footer->num_table_entries > MAX_TABLE_ENTRIES(SEG_SIZE - footer->oprom_size)) return -EINVAL; /* find the file table start, which precedes the footer */ directory_size = DIRECTORY_SIZE(footer->num_table_entries); if (directory_size <= EP_PAGE_SIZE) { /* the file table fits into the directory buffer handed in */ table = (struct hfi1_eprom_table_entry *) (directory + EP_PAGE_SIZE - directory_size); } else { /* need to allocate and read more */ table_buffer = kmalloc(directory_size, GFP_KERNEL); if (!table_buffer) return -ENOMEM; ret = read_length(dd, SEG_SIZE - directory_size, directory_size, table_buffer); if (ret) goto done; table = table_buffer; } /* look for the platform configuration file in the table */ for (entry = NULL, i = 0; i < footer->num_table_entries; i++) { if (table[i].type == HFI1_EFT_PLATFORM_CONFIG) { entry = &table[i]; break; } } if (!entry) { ret = -ENOENT; goto done; } /* * Sanity check on the configuration file size - it should never * be larger than 4 KiB. */ if (entry->size > (4 * 1024)) { dd_dev_err(dd, "Bad configuration file size 0x%x\n", entry->size); ret = -EINVAL; goto done; } /* check for bogus offset and size that wrap when added together */ if (entry->offset + entry->size < entry->offset) { dd_dev_err(dd, "Bad configuration file start + size 0x%x+0x%x\n", entry->offset, entry->size); ret = -EINVAL; goto done; } /* allocate the buffer to return */ buffer = kmalloc(entry->size, GFP_KERNEL); if (!buffer) { ret = -ENOMEM; goto done; } /* * Extract the file by looping over segments until it is fully read. */ seg_offset = entry->offset % SEG_SIZE; seg_base = entry->offset - seg_offset; ncopied = 0; while (ncopied < entry->size) { /* calculate data bytes available in this segment */ /* start with the bytes from the current offset to the end */ bytes_available = SEG_SIZE - seg_offset; /* subtract off footer and table from segment 0 */ if (seg_base == 0) { /* * Sanity check: should not have a starting point * at or within the directory. */ if (bytes_available <= directory_size) { dd_dev_err(dd, "Bad configuration file - offset 0x%x within footer+table\n", entry->offset); ret = -EINVAL; goto done; } bytes_available -= directory_size; } /* calculate bytes wanted */ to_copy = entry->size - ncopied; /* max out at the available bytes in this segment */ if (to_copy > bytes_available) to_copy = bytes_available; /* * Read from the EPROM. * * The sanity check for entry->offset is done in read_length(). * The EPROM offset is validated against what the hardware * addressing supports. In addition, if the offset is larger * than the actual EPROM, it silently wraps. It will work * fine, though the reader may not get what they expected * from the EPROM. */ ret = read_length(dd, seg_base + seg_offset, to_copy, buffer + ncopied); if (ret) goto done; ncopied += to_copy; /* set up for next segment */ seg_offset = footer->oprom_size; seg_base += SEG_SIZE; } /* success */ ret = 0; *data = buffer; *size = entry->size; done: kfree(table_buffer); if (ret) kfree(buffer); return ret; } /* * Read the platform configuration file from the EPROM. * * On success, an allocated buffer containing the data and its size are * returned. It is up to the caller to free this buffer. * * Return value: * 0 - success * -ENXIO - no EPROM is available * -EBUSY - not able to acquire access to the EPROM * -ENOENT - no recognizable file written * -ENOMEM - buffer could not be allocated * -EINVAL - invalid EPROM contentents found */ int eprom_read_platform_config(struct hfi1_devdata *dd, void **data, u32 *size) { u32 directory[EP_PAGE_DWORDS]; /* aligned buffer */ int ret; if (!dd->eprom_available) return -ENXIO; ret = acquire_chip_resource(dd, CR_EPROM, EPROM_TIMEOUT); if (ret) return -EBUSY; /* read the last page of the segment for the EPROM format magic */ ret = read_length(dd, SEG_SIZE - EP_PAGE_SIZE, EP_PAGE_SIZE, directory); if (ret) goto done; /* last dword of the segment contains a magic value */ if (directory[EP_PAGE_DWORDS - 1] == FOOTER_MAGIC) { /* segment format */ ret = read_segment_platform_config(dd, directory, data, size); } else { /* partition format */ ret = read_partition_platform_config(dd, data, size); } done: release_chip_resource(dd, CR_EPROM); return ret; }
linux-master
drivers/infiniband/hw/hfi1/eprom.c
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) /* * Copyright(c) 2018 Intel Corporation. * */ #include "iowait.h" #include "trace_iowait.h" /* 1 priority == 16 starve_cnt */ #define IOWAIT_PRIORITY_STARVE_SHIFT 4 void iowait_set_flag(struct iowait *wait, u32 flag) { trace_hfi1_iowait_set(wait, flag); set_bit(flag, &wait->flags); } bool iowait_flag_set(struct iowait *wait, u32 flag) { return test_bit(flag, &wait->flags); } inline void iowait_clear_flag(struct iowait *wait, u32 flag) { trace_hfi1_iowait_clear(wait, flag); clear_bit(flag, &wait->flags); } /* * iowait_init() - initialize wait structure * @wait: wait struct to initialize * @tx_limit: limit for overflow queuing * @func: restart function for workqueue * @sleep: sleep function for no space * @resume: wakeup function for no space * * This function initializes the iowait * structure embedded in the QP or PQ. * */ void iowait_init(struct iowait *wait, u32 tx_limit, void (*func)(struct work_struct *work), void (*tidfunc)(struct work_struct *work), int (*sleep)(struct sdma_engine *sde, struct iowait_work *wait, struct sdma_txreq *tx, uint seq, bool pkts_sent), void (*wakeup)(struct iowait *wait, int reason), void (*sdma_drained)(struct iowait *wait), void (*init_priority)(struct iowait *wait)) { int i; wait->count = 0; INIT_LIST_HEAD(&wait->list); init_waitqueue_head(&wait->wait_dma); init_waitqueue_head(&wait->wait_pio); atomic_set(&wait->sdma_busy, 0); atomic_set(&wait->pio_busy, 0); wait->tx_limit = tx_limit; wait->sleep = sleep; wait->wakeup = wakeup; wait->sdma_drained = sdma_drained; wait->init_priority = init_priority; wait->flags = 0; for (i = 0; i < IOWAIT_SES; i++) { wait->wait[i].iow = wait; INIT_LIST_HEAD(&wait->wait[i].tx_head); if (i == IOWAIT_IB_SE) INIT_WORK(&wait->wait[i].iowork, func); else INIT_WORK(&wait->wait[i].iowork, tidfunc); } } /** * iowait_cancel_work - cancel all work in iowait * @w: the iowait struct */ void iowait_cancel_work(struct iowait *w) { cancel_work_sync(&iowait_get_ib_work(w)->iowork); /* Make sure that the iowork for TID RDMA is used */ if (iowait_get_tid_work(w)->iowork.func) cancel_work_sync(&iowait_get_tid_work(w)->iowork); } /** * iowait_set_work_flag - set work flag based on leg * @w: the iowait work struct */ int iowait_set_work_flag(struct iowait_work *w) { if (w == &w->iow->wait[IOWAIT_IB_SE]) { iowait_set_flag(w->iow, IOWAIT_PENDING_IB); return IOWAIT_IB_SE; } iowait_set_flag(w->iow, IOWAIT_PENDING_TID); return IOWAIT_TID_SE; } /** * iowait_priority_update_top - update the top priority entry * @w: the iowait struct * @top: a pointer to the top priority entry * @idx: the index of the current iowait in an array * @top_idx: the array index for the iowait entry that has the top priority * * This function is called to compare the priority of a given * iowait with the given top priority entry. The top index will * be returned. */ uint iowait_priority_update_top(struct iowait *w, struct iowait *top, uint idx, uint top_idx) { u8 cnt, tcnt; /* Convert priority into starve_cnt and compare the total.*/ cnt = (w->priority << IOWAIT_PRIORITY_STARVE_SHIFT) + w->starved_cnt; tcnt = (top->priority << IOWAIT_PRIORITY_STARVE_SHIFT) + top->starved_cnt; if (cnt > tcnt) return idx; else return top_idx; }
linux-master
drivers/infiniband/hw/hfi1/iowait.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2015 - 2017 Intel Corporation. */ #include <linux/firmware.h> #include <linux/mutex.h> #include <linux/delay.h> #include <linux/crc32.h> #include "hfi.h" #include "trace.h" /* * Make it easy to toggle firmware file name and if it gets loaded by * editing the following. This may be something we do while in development * but not necessarily something a user would ever need to use. */ #define DEFAULT_FW_8051_NAME_FPGA "hfi_dc8051.bin" #define DEFAULT_FW_8051_NAME_ASIC "hfi1_dc8051.fw" #define DEFAULT_FW_FABRIC_NAME "hfi1_fabric.fw" #define DEFAULT_FW_SBUS_NAME "hfi1_sbus.fw" #define DEFAULT_FW_PCIE_NAME "hfi1_pcie.fw" #define ALT_FW_8051_NAME_ASIC "hfi1_dc8051_d.fw" #define ALT_FW_FABRIC_NAME "hfi1_fabric_d.fw" #define ALT_FW_SBUS_NAME "hfi1_sbus_d.fw" #define ALT_FW_PCIE_NAME "hfi1_pcie_d.fw" MODULE_FIRMWARE(DEFAULT_FW_8051_NAME_ASIC); MODULE_FIRMWARE(DEFAULT_FW_FABRIC_NAME); MODULE_FIRMWARE(DEFAULT_FW_SBUS_NAME); MODULE_FIRMWARE(DEFAULT_FW_PCIE_NAME); static uint fw_8051_load = 1; static uint fw_fabric_serdes_load = 1; static uint fw_pcie_serdes_load = 1; static uint fw_sbus_load = 1; /* Firmware file names get set in hfi1_firmware_init() based on the above */ static char *fw_8051_name; static char *fw_fabric_serdes_name; static char *fw_sbus_name; static char *fw_pcie_serdes_name; #define SBUS_MAX_POLL_COUNT 100 #define SBUS_COUNTER(reg, name) \ (((reg) >> ASIC_STS_SBUS_COUNTERS_##name##_CNT_SHIFT) & \ ASIC_STS_SBUS_COUNTERS_##name##_CNT_MASK) /* * Firmware security header. */ struct css_header { u32 module_type; u32 header_len; u32 header_version; u32 module_id; u32 module_vendor; u32 date; /* BCD yyyymmdd */ u32 size; /* in DWORDs */ u32 key_size; /* in DWORDs */ u32 modulus_size; /* in DWORDs */ u32 exponent_size; /* in DWORDs */ u32 reserved[22]; }; /* expected field values */ #define CSS_MODULE_TYPE 0x00000006 #define CSS_HEADER_LEN 0x000000a1 #define CSS_HEADER_VERSION 0x00010000 #define CSS_MODULE_VENDOR 0x00008086 #define KEY_SIZE 256 #define MU_SIZE 8 #define EXPONENT_SIZE 4 /* size of platform configuration partition */ #define MAX_PLATFORM_CONFIG_FILE_SIZE 4096 /* size of file of plaform configuration encoded in format version 4 */ #define PLATFORM_CONFIG_FORMAT_4_FILE_SIZE 528 /* the file itself */ struct firmware_file { struct css_header css_header; u8 modulus[KEY_SIZE]; u8 exponent[EXPONENT_SIZE]; u8 signature[KEY_SIZE]; u8 firmware[]; }; struct augmented_firmware_file { struct css_header css_header; u8 modulus[KEY_SIZE]; u8 exponent[EXPONENT_SIZE]; u8 signature[KEY_SIZE]; u8 r2[KEY_SIZE]; u8 mu[MU_SIZE]; u8 firmware[]; }; /* augmented file size difference */ #define AUGMENT_SIZE (sizeof(struct augmented_firmware_file) - \ sizeof(struct firmware_file)) struct firmware_details { /* Linux core piece */ const struct firmware *fw; struct css_header *css_header; u8 *firmware_ptr; /* pointer to binary data */ u32 firmware_len; /* length in bytes */ u8 *modulus; /* pointer to the modulus */ u8 *exponent; /* pointer to the exponent */ u8 *signature; /* pointer to the signature */ u8 *r2; /* pointer to r2 */ u8 *mu; /* pointer to mu */ struct augmented_firmware_file dummy_header; }; /* * The mutex protects fw_state, fw_err, and all of the firmware_details * variables. */ static DEFINE_MUTEX(fw_mutex); enum fw_state { FW_EMPTY, FW_TRY, FW_FINAL, FW_ERR }; static enum fw_state fw_state = FW_EMPTY; static int fw_err; static struct firmware_details fw_8051; static struct firmware_details fw_fabric; static struct firmware_details fw_pcie; static struct firmware_details fw_sbus; /* flags for turn_off_spicos() */ #define SPICO_SBUS 0x1 #define SPICO_FABRIC 0x2 #define ENABLE_SPICO_SMASK 0x1 /* security block commands */ #define RSA_CMD_INIT 0x1 #define RSA_CMD_START 0x2 /* security block status */ #define RSA_STATUS_IDLE 0x0 #define RSA_STATUS_ACTIVE 0x1 #define RSA_STATUS_DONE 0x2 #define RSA_STATUS_FAILED 0x3 /* RSA engine timeout, in ms */ #define RSA_ENGINE_TIMEOUT 100 /* ms */ /* hardware mutex timeout, in ms */ #define HM_TIMEOUT 10 /* ms */ /* 8051 memory access timeout, in us */ #define DC8051_ACCESS_TIMEOUT 100 /* us */ /* the number of fabric SerDes on the SBus */ #define NUM_FABRIC_SERDES 4 /* ASIC_STS_SBUS_RESULT.RESULT_CODE value */ #define SBUS_READ_COMPLETE 0x4 /* SBus fabric SerDes addresses, one set per HFI */ static const u8 fabric_serdes_addrs[2][NUM_FABRIC_SERDES] = { { 0x01, 0x02, 0x03, 0x04 }, { 0x28, 0x29, 0x2a, 0x2b } }; /* SBus PCIe SerDes addresses, one set per HFI */ static const u8 pcie_serdes_addrs[2][NUM_PCIE_SERDES] = { { 0x08, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, 0x20, 0x22, 0x24, 0x26 }, { 0x2f, 0x31, 0x33, 0x35, 0x37, 0x39, 0x3b, 0x3d, 0x3f, 0x41, 0x43, 0x45, 0x47, 0x49, 0x4b, 0x4d } }; /* SBus PCIe PCS addresses, one set per HFI */ const u8 pcie_pcs_addrs[2][NUM_PCIE_SERDES] = { { 0x09, 0x0b, 0x0d, 0x0f, 0x11, 0x13, 0x15, 0x17, 0x19, 0x1b, 0x1d, 0x1f, 0x21, 0x23, 0x25, 0x27 }, { 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e, 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e } }; /* SBus fabric SerDes broadcast addresses, one per HFI */ static const u8 fabric_serdes_broadcast[2] = { 0xe4, 0xe5 }; static const u8 all_fabric_serdes_broadcast = 0xe1; /* SBus PCIe SerDes broadcast addresses, one per HFI */ const u8 pcie_serdes_broadcast[2] = { 0xe2, 0xe3 }; static const u8 all_pcie_serdes_broadcast = 0xe0; static const u32 platform_config_table_limits[PLATFORM_CONFIG_TABLE_MAX] = { 0, SYSTEM_TABLE_MAX, PORT_TABLE_MAX, RX_PRESET_TABLE_MAX, TX_PRESET_TABLE_MAX, QSFP_ATTEN_TABLE_MAX, VARIABLE_SETTINGS_TABLE_MAX }; /* forwards */ static void dispose_one_firmware(struct firmware_details *fdet); static int load_fabric_serdes_firmware(struct hfi1_devdata *dd, struct firmware_details *fdet); static void dump_fw_version(struct hfi1_devdata *dd); /* * Read a single 64-bit value from 8051 data memory. * * Expects: * o caller to have already set up data read, no auto increment * o caller to turn off read enable when finished * * The address argument is a byte offset. Bits 0:2 in the address are * ignored - i.e. the hardware will always do aligned 8-byte reads as if * the lower bits are zero. * * Return 0 on success, -ENXIO on a read error (timeout). */ static int __read_8051_data(struct hfi1_devdata *dd, u32 addr, u64 *result) { u64 reg; int count; /* step 1: set the address, clear enable */ reg = (addr & DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_MASK) << DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_SHIFT; write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, reg); /* step 2: enable */ write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, reg | DC_DC8051_CFG_RAM_ACCESS_CTRL_READ_ENA_SMASK); /* wait until ACCESS_COMPLETED is set */ count = 0; while ((read_csr(dd, DC_DC8051_CFG_RAM_ACCESS_STATUS) & DC_DC8051_CFG_RAM_ACCESS_STATUS_ACCESS_COMPLETED_SMASK) == 0) { count++; if (count > DC8051_ACCESS_TIMEOUT) { dd_dev_err(dd, "timeout reading 8051 data\n"); return -ENXIO; } ndelay(10); } /* gather the data */ *result = read_csr(dd, DC_DC8051_CFG_RAM_ACCESS_RD_DATA); return 0; } /* * Read 8051 data starting at addr, for len bytes. Will read in 8-byte chunks. * Return 0 on success, -errno on error. */ int read_8051_data(struct hfi1_devdata *dd, u32 addr, u32 len, u64 *result) { unsigned long flags; u32 done; int ret = 0; spin_lock_irqsave(&dd->dc8051_memlock, flags); /* data read set-up, no auto-increment */ write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_SETUP, 0); for (done = 0; done < len; addr += 8, done += 8, result++) { ret = __read_8051_data(dd, addr, result); if (ret) break; } /* turn off read enable */ write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, 0); spin_unlock_irqrestore(&dd->dc8051_memlock, flags); return ret; } /* * Write data or code to the 8051 code or data RAM. */ static int write_8051(struct hfi1_devdata *dd, int code, u32 start, const u8 *data, u32 len) { u64 reg; u32 offset; int aligned, count; /* check alignment */ aligned = ((unsigned long)data & 0x7) == 0; /* write set-up */ reg = (code ? DC_DC8051_CFG_RAM_ACCESS_SETUP_RAM_SEL_SMASK : 0ull) | DC_DC8051_CFG_RAM_ACCESS_SETUP_AUTO_INCR_ADDR_SMASK; write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_SETUP, reg); reg = ((start & DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_MASK) << DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_SHIFT) | DC_DC8051_CFG_RAM_ACCESS_CTRL_WRITE_ENA_SMASK; write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, reg); /* write */ for (offset = 0; offset < len; offset += 8) { int bytes = len - offset; if (bytes < 8) { reg = 0; memcpy(&reg, &data[offset], bytes); } else if (aligned) { reg = *(u64 *)&data[offset]; } else { memcpy(&reg, &data[offset], 8); } write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_WR_DATA, reg); /* wait until ACCESS_COMPLETED is set */ count = 0; while ((read_csr(dd, DC_DC8051_CFG_RAM_ACCESS_STATUS) & DC_DC8051_CFG_RAM_ACCESS_STATUS_ACCESS_COMPLETED_SMASK) == 0) { count++; if (count > DC8051_ACCESS_TIMEOUT) { dd_dev_err(dd, "timeout writing 8051 data\n"); return -ENXIO; } udelay(1); } } /* turn off write access, auto increment (also sets to data access) */ write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, 0); write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_SETUP, 0); return 0; } /* return 0 if values match, non-zero and complain otherwise */ static int invalid_header(struct hfi1_devdata *dd, const char *what, u32 actual, u32 expected) { if (actual == expected) return 0; dd_dev_err(dd, "invalid firmware header field %s: expected 0x%x, actual 0x%x\n", what, expected, actual); return 1; } /* * Verify that the static fields in the CSS header match. */ static int verify_css_header(struct hfi1_devdata *dd, struct css_header *css) { /* verify CSS header fields (most sizes are in DW, so add /4) */ if (invalid_header(dd, "module_type", css->module_type, CSS_MODULE_TYPE) || invalid_header(dd, "header_len", css->header_len, (sizeof(struct firmware_file) / 4)) || invalid_header(dd, "header_version", css->header_version, CSS_HEADER_VERSION) || invalid_header(dd, "module_vendor", css->module_vendor, CSS_MODULE_VENDOR) || invalid_header(dd, "key_size", css->key_size, KEY_SIZE / 4) || invalid_header(dd, "modulus_size", css->modulus_size, KEY_SIZE / 4) || invalid_header(dd, "exponent_size", css->exponent_size, EXPONENT_SIZE / 4)) { return -EINVAL; } return 0; } /* * Make sure there are at least some bytes after the prefix. */ static int payload_check(struct hfi1_devdata *dd, const char *name, long file_size, long prefix_size) { /* make sure we have some payload */ if (prefix_size >= file_size) { dd_dev_err(dd, "firmware \"%s\", size %ld, must be larger than %ld bytes\n", name, file_size, prefix_size); return -EINVAL; } return 0; } /* * Request the firmware from the system. Extract the pieces and fill in * fdet. If successful, the caller will need to call dispose_one_firmware(). * Returns 0 on success, -ERRNO on error. */ static int obtain_one_firmware(struct hfi1_devdata *dd, const char *name, struct firmware_details *fdet) { struct css_header *css; int ret; memset(fdet, 0, sizeof(*fdet)); ret = request_firmware(&fdet->fw, name, &dd->pcidev->dev); if (ret) { dd_dev_warn(dd, "cannot find firmware \"%s\", err %d\n", name, ret); return ret; } /* verify the firmware */ if (fdet->fw->size < sizeof(struct css_header)) { dd_dev_err(dd, "firmware \"%s\" is too small\n", name); ret = -EINVAL; goto done; } css = (struct css_header *)fdet->fw->data; hfi1_cdbg(FIRMWARE, "Firmware %s details:", name); hfi1_cdbg(FIRMWARE, "file size: 0x%lx bytes", fdet->fw->size); hfi1_cdbg(FIRMWARE, "CSS structure:"); hfi1_cdbg(FIRMWARE, " module_type 0x%x", css->module_type); hfi1_cdbg(FIRMWARE, " header_len 0x%03x (0x%03x bytes)", css->header_len, 4 * css->header_len); hfi1_cdbg(FIRMWARE, " header_version 0x%x", css->header_version); hfi1_cdbg(FIRMWARE, " module_id 0x%x", css->module_id); hfi1_cdbg(FIRMWARE, " module_vendor 0x%x", css->module_vendor); hfi1_cdbg(FIRMWARE, " date 0x%x", css->date); hfi1_cdbg(FIRMWARE, " size 0x%03x (0x%03x bytes)", css->size, 4 * css->size); hfi1_cdbg(FIRMWARE, " key_size 0x%03x (0x%03x bytes)", css->key_size, 4 * css->key_size); hfi1_cdbg(FIRMWARE, " modulus_size 0x%03x (0x%03x bytes)", css->modulus_size, 4 * css->modulus_size); hfi1_cdbg(FIRMWARE, " exponent_size 0x%03x (0x%03x bytes)", css->exponent_size, 4 * css->exponent_size); hfi1_cdbg(FIRMWARE, "firmware size: 0x%lx bytes", fdet->fw->size - sizeof(struct firmware_file)); /* * If the file does not have a valid CSS header, fail. * Otherwise, check the CSS size field for an expected size. * The augmented file has r2 and mu inserted after the header * was generated, so there will be a known difference between * the CSS header size and the actual file size. Use this * difference to identify an augmented file. * * Note: css->size is in DWORDs, multiply by 4 to get bytes. */ ret = verify_css_header(dd, css); if (ret) { dd_dev_info(dd, "Invalid CSS header for \"%s\"\n", name); } else if ((css->size * 4) == fdet->fw->size) { /* non-augmented firmware file */ struct firmware_file *ff = (struct firmware_file *) fdet->fw->data; /* make sure there are bytes in the payload */ ret = payload_check(dd, name, fdet->fw->size, sizeof(struct firmware_file)); if (ret == 0) { fdet->css_header = css; fdet->modulus = ff->modulus; fdet->exponent = ff->exponent; fdet->signature = ff->signature; fdet->r2 = fdet->dummy_header.r2; /* use dummy space */ fdet->mu = fdet->dummy_header.mu; /* use dummy space */ fdet->firmware_ptr = ff->firmware; fdet->firmware_len = fdet->fw->size - sizeof(struct firmware_file); /* * Header does not include r2 and mu - generate here. * For now, fail. */ dd_dev_err(dd, "driver is unable to validate firmware without r2 and mu (not in firmware file)\n"); ret = -EINVAL; } } else if ((css->size * 4) + AUGMENT_SIZE == fdet->fw->size) { /* augmented firmware file */ struct augmented_firmware_file *aff = (struct augmented_firmware_file *)fdet->fw->data; /* make sure there are bytes in the payload */ ret = payload_check(dd, name, fdet->fw->size, sizeof(struct augmented_firmware_file)); if (ret == 0) { fdet->css_header = css; fdet->modulus = aff->modulus; fdet->exponent = aff->exponent; fdet->signature = aff->signature; fdet->r2 = aff->r2; fdet->mu = aff->mu; fdet->firmware_ptr = aff->firmware; fdet->firmware_len = fdet->fw->size - sizeof(struct augmented_firmware_file); } } else { /* css->size check failed */ dd_dev_err(dd, "invalid firmware header field size: expected 0x%lx or 0x%lx, actual 0x%x\n", fdet->fw->size / 4, (fdet->fw->size - AUGMENT_SIZE) / 4, css->size); ret = -EINVAL; } done: /* if returning an error, clean up after ourselves */ if (ret) dispose_one_firmware(fdet); return ret; } static void dispose_one_firmware(struct firmware_details *fdet) { release_firmware(fdet->fw); /* erase all previous information */ memset(fdet, 0, sizeof(*fdet)); } /* * Obtain the 4 firmwares from the OS. All must be obtained at once or not * at all. If called with the firmware state in FW_TRY, use alternate names. * On exit, this routine will have set the firmware state to one of FW_TRY, * FW_FINAL, or FW_ERR. * * Must be holding fw_mutex. */ static void __obtain_firmware(struct hfi1_devdata *dd) { int err = 0; if (fw_state == FW_FINAL) /* nothing more to obtain */ return; if (fw_state == FW_ERR) /* already in error */ return; /* fw_state is FW_EMPTY or FW_TRY */ retry: if (fw_state == FW_TRY) { /* * We tried the original and it failed. Move to the * alternate. */ dd_dev_warn(dd, "using alternate firmware names\n"); /* * Let others run. Some systems, when missing firmware, does * something that holds for 30 seconds. If we do that twice * in a row it triggers task blocked warning. */ cond_resched(); if (fw_8051_load) dispose_one_firmware(&fw_8051); if (fw_fabric_serdes_load) dispose_one_firmware(&fw_fabric); if (fw_sbus_load) dispose_one_firmware(&fw_sbus); if (fw_pcie_serdes_load) dispose_one_firmware(&fw_pcie); fw_8051_name = ALT_FW_8051_NAME_ASIC; fw_fabric_serdes_name = ALT_FW_FABRIC_NAME; fw_sbus_name = ALT_FW_SBUS_NAME; fw_pcie_serdes_name = ALT_FW_PCIE_NAME; /* * Add a delay before obtaining and loading debug firmware. * Authorization will fail if the delay between firmware * authorization events is shorter than 50us. Add 100us to * make a delay time safe. */ usleep_range(100, 120); } if (fw_sbus_load) { err = obtain_one_firmware(dd, fw_sbus_name, &fw_sbus); if (err) goto done; } if (fw_pcie_serdes_load) { err = obtain_one_firmware(dd, fw_pcie_serdes_name, &fw_pcie); if (err) goto done; } if (fw_fabric_serdes_load) { err = obtain_one_firmware(dd, fw_fabric_serdes_name, &fw_fabric); if (err) goto done; } if (fw_8051_load) { err = obtain_one_firmware(dd, fw_8051_name, &fw_8051); if (err) goto done; } done: if (err) { /* oops, had problems obtaining a firmware */ if (fw_state == FW_EMPTY && dd->icode == ICODE_RTL_SILICON) { /* retry with alternate (RTL only) */ fw_state = FW_TRY; goto retry; } dd_dev_err(dd, "unable to obtain working firmware\n"); fw_state = FW_ERR; fw_err = -ENOENT; } else { /* success */ if (fw_state == FW_EMPTY && dd->icode != ICODE_FUNCTIONAL_SIMULATOR) fw_state = FW_TRY; /* may retry later */ else fw_state = FW_FINAL; /* cannot try again */ } } /* * Called by all HFIs when loading their firmware - i.e. device probe time. * The first one will do the actual firmware load. Use a mutex to resolve * any possible race condition. * * The call to this routine cannot be moved to driver load because the kernel * call request_firmware() requires a device which is only available after * the first device probe. */ static int obtain_firmware(struct hfi1_devdata *dd) { unsigned long timeout; mutex_lock(&fw_mutex); /* 40s delay due to long delay on missing firmware on some systems */ timeout = jiffies + msecs_to_jiffies(40000); while (fw_state == FW_TRY) { /* * Another device is trying the firmware. Wait until it * decides what works (or not). */ if (time_after(jiffies, timeout)) { /* waited too long */ dd_dev_err(dd, "Timeout waiting for firmware try"); fw_state = FW_ERR; fw_err = -ETIMEDOUT; break; } mutex_unlock(&fw_mutex); msleep(20); /* arbitrary delay */ mutex_lock(&fw_mutex); } /* not in FW_TRY state */ /* set fw_state to FW_TRY, FW_FINAL, or FW_ERR, and fw_err */ if (fw_state == FW_EMPTY) __obtain_firmware(dd); mutex_unlock(&fw_mutex); return fw_err; } /* * Called when the driver unloads. The timing is asymmetric with its * counterpart, obtain_firmware(). If called at device remove time, * then it is conceivable that another device could probe while the * firmware is being disposed. The mutexes can be moved to do that * safely, but then the firmware would be requested from the OS multiple * times. * * No mutex is needed as the driver is unloading and there cannot be any * other callers. */ void dispose_firmware(void) { dispose_one_firmware(&fw_8051); dispose_one_firmware(&fw_fabric); dispose_one_firmware(&fw_pcie); dispose_one_firmware(&fw_sbus); /* retain the error state, otherwise revert to empty */ if (fw_state != FW_ERR) fw_state = FW_EMPTY; } /* * Called with the result of a firmware download. * * Return 1 to retry loading the firmware, 0 to stop. */ static int retry_firmware(struct hfi1_devdata *dd, int load_result) { int retry; mutex_lock(&fw_mutex); if (load_result == 0) { /* * The load succeeded, so expect all others to do the same. * Do not retry again. */ if (fw_state == FW_TRY) fw_state = FW_FINAL; retry = 0; /* do NOT retry */ } else if (fw_state == FW_TRY) { /* load failed, obtain alternate firmware */ __obtain_firmware(dd); retry = (fw_state == FW_FINAL); } else { /* else in FW_FINAL or FW_ERR, no retry in either case */ retry = 0; } mutex_unlock(&fw_mutex); return retry; } /* * Write a block of data to a given array CSR. All calls will be in * multiples of 8 bytes. */ static void write_rsa_data(struct hfi1_devdata *dd, int what, const u8 *data, int nbytes) { int qw_size = nbytes / 8; int i; if (((unsigned long)data & 0x7) == 0) { /* aligned */ u64 *ptr = (u64 *)data; for (i = 0; i < qw_size; i++, ptr++) write_csr(dd, what + (8 * i), *ptr); } else { /* not aligned */ for (i = 0; i < qw_size; i++, data += 8) { u64 value; memcpy(&value, data, 8); write_csr(dd, what + (8 * i), value); } } } /* * Write a block of data to a given CSR as a stream of writes. All calls will * be in multiples of 8 bytes. */ static void write_streamed_rsa_data(struct hfi1_devdata *dd, int what, const u8 *data, int nbytes) { u64 *ptr = (u64 *)data; int qw_size = nbytes / 8; for (; qw_size > 0; qw_size--, ptr++) write_csr(dd, what, *ptr); } /* * Download the signature and start the RSA mechanism. Wait for * RSA_ENGINE_TIMEOUT before giving up. */ static int run_rsa(struct hfi1_devdata *dd, const char *who, const u8 *signature) { unsigned long timeout; u64 reg; u32 status; int ret = 0; /* write the signature */ write_rsa_data(dd, MISC_CFG_RSA_SIGNATURE, signature, KEY_SIZE); /* initialize RSA */ write_csr(dd, MISC_CFG_RSA_CMD, RSA_CMD_INIT); /* * Make sure the engine is idle and insert a delay between the two * writes to MISC_CFG_RSA_CMD. */ status = (read_csr(dd, MISC_CFG_FW_CTRL) & MISC_CFG_FW_CTRL_RSA_STATUS_SMASK) >> MISC_CFG_FW_CTRL_RSA_STATUS_SHIFT; if (status != RSA_STATUS_IDLE) { dd_dev_err(dd, "%s security engine not idle - giving up\n", who); return -EBUSY; } /* start RSA */ write_csr(dd, MISC_CFG_RSA_CMD, RSA_CMD_START); /* * Look for the result. * * The RSA engine is hooked up to two MISC errors. The driver * masks these errors as they do not respond to the standard * error "clear down" mechanism. Look for these errors here and * clear them when possible. This routine will exit with the * errors of the current run still set. * * MISC_FW_AUTH_FAILED_ERR * Firmware authorization failed. This can be cleared by * re-initializing the RSA engine, then clearing the status bit. * Do not re-init the RSA angine immediately after a successful * run - this will reset the current authorization. * * MISC_KEY_MISMATCH_ERR * Key does not match. The only way to clear this is to load * a matching key then clear the status bit. If this error * is raised, it will persist outside of this routine until a * matching key is loaded. */ timeout = msecs_to_jiffies(RSA_ENGINE_TIMEOUT) + jiffies; while (1) { status = (read_csr(dd, MISC_CFG_FW_CTRL) & MISC_CFG_FW_CTRL_RSA_STATUS_SMASK) >> MISC_CFG_FW_CTRL_RSA_STATUS_SHIFT; if (status == RSA_STATUS_IDLE) { /* should not happen */ dd_dev_err(dd, "%s firmware security bad idle state\n", who); ret = -EINVAL; break; } else if (status == RSA_STATUS_DONE) { /* finished successfully */ break; } else if (status == RSA_STATUS_FAILED) { /* finished unsuccessfully */ ret = -EINVAL; break; } /* else still active */ if (time_after(jiffies, timeout)) { /* * Timed out while active. We can't reset the engine * if it is stuck active, but run through the * error code to see what error bits are set. */ dd_dev_err(dd, "%s firmware security time out\n", who); ret = -ETIMEDOUT; break; } msleep(20); } /* * Arrive here on success or failure. Clear all RSA engine * errors. All current errors will stick - the RSA logic is keeping * error high. All previous errors will clear - the RSA logic * is not keeping the error high. */ write_csr(dd, MISC_ERR_CLEAR, MISC_ERR_STATUS_MISC_FW_AUTH_FAILED_ERR_SMASK | MISC_ERR_STATUS_MISC_KEY_MISMATCH_ERR_SMASK); /* * All that is left are the current errors. Print warnings on * authorization failure details, if any. Firmware authorization * can be retried, so these are only warnings. */ reg = read_csr(dd, MISC_ERR_STATUS); if (ret) { if (reg & MISC_ERR_STATUS_MISC_FW_AUTH_FAILED_ERR_SMASK) dd_dev_warn(dd, "%s firmware authorization failed\n", who); if (reg & MISC_ERR_STATUS_MISC_KEY_MISMATCH_ERR_SMASK) dd_dev_warn(dd, "%s firmware key mismatch\n", who); } return ret; } static void load_security_variables(struct hfi1_devdata *dd, struct firmware_details *fdet) { /* Security variables a. Write the modulus */ write_rsa_data(dd, MISC_CFG_RSA_MODULUS, fdet->modulus, KEY_SIZE); /* Security variables b. Write the r2 */ write_rsa_data(dd, MISC_CFG_RSA_R2, fdet->r2, KEY_SIZE); /* Security variables c. Write the mu */ write_rsa_data(dd, MISC_CFG_RSA_MU, fdet->mu, MU_SIZE); /* Security variables d. Write the header */ write_streamed_rsa_data(dd, MISC_CFG_SHA_PRELOAD, (u8 *)fdet->css_header, sizeof(struct css_header)); } /* return the 8051 firmware state */ static inline u32 get_firmware_state(struct hfi1_devdata *dd) { u64 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE); return (reg >> DC_DC8051_STS_CUR_STATE_FIRMWARE_SHIFT) & DC_DC8051_STS_CUR_STATE_FIRMWARE_MASK; } /* * Wait until the firmware is up and ready to take host requests. * Return 0 on success, -ETIMEDOUT on timeout. */ int wait_fm_ready(struct hfi1_devdata *dd, u32 mstimeout) { unsigned long timeout; /* in the simulator, the fake 8051 is always ready */ if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) return 0; timeout = msecs_to_jiffies(mstimeout) + jiffies; while (1) { if (get_firmware_state(dd) == 0xa0) /* ready */ return 0; if (time_after(jiffies, timeout)) /* timed out */ return -ETIMEDOUT; usleep_range(1950, 2050); /* sleep 2ms-ish */ } } /* * Load the 8051 firmware. */ static int load_8051_firmware(struct hfi1_devdata *dd, struct firmware_details *fdet) { u64 reg; int ret; u8 ver_major; u8 ver_minor; u8 ver_patch; /* * DC Reset sequence * Load DC 8051 firmware */ /* * DC reset step 1: Reset DC8051 */ reg = DC_DC8051_CFG_RST_M8051W_SMASK | DC_DC8051_CFG_RST_CRAM_SMASK | DC_DC8051_CFG_RST_DRAM_SMASK | DC_DC8051_CFG_RST_IRAM_SMASK | DC_DC8051_CFG_RST_SFR_SMASK; write_csr(dd, DC_DC8051_CFG_RST, reg); /* * DC reset step 2 (optional): Load 8051 data memory with link * configuration */ /* * DC reset step 3: Load DC8051 firmware */ /* release all but the core reset */ reg = DC_DC8051_CFG_RST_M8051W_SMASK; write_csr(dd, DC_DC8051_CFG_RST, reg); /* Firmware load step 1 */ load_security_variables(dd, fdet); /* * Firmware load step 2. Clear MISC_CFG_FW_CTRL.FW_8051_LOADED */ write_csr(dd, MISC_CFG_FW_CTRL, 0); /* Firmware load steps 3-5 */ ret = write_8051(dd, 1/*code*/, 0, fdet->firmware_ptr, fdet->firmware_len); if (ret) return ret; /* * DC reset step 4. Host starts the DC8051 firmware */ /* * Firmware load step 6. Set MISC_CFG_FW_CTRL.FW_8051_LOADED */ write_csr(dd, MISC_CFG_FW_CTRL, MISC_CFG_FW_CTRL_FW_8051_LOADED_SMASK); /* Firmware load steps 7-10 */ ret = run_rsa(dd, "8051", fdet->signature); if (ret) return ret; /* clear all reset bits, releasing the 8051 */ write_csr(dd, DC_DC8051_CFG_RST, 0ull); /* * DC reset step 5. Wait for firmware to be ready to accept host * requests. */ ret = wait_fm_ready(dd, TIMEOUT_8051_START); if (ret) { /* timed out */ dd_dev_err(dd, "8051 start timeout, current state 0x%x\n", get_firmware_state(dd)); return -ETIMEDOUT; } read_misc_status(dd, &ver_major, &ver_minor, &ver_patch); dd_dev_info(dd, "8051 firmware version %d.%d.%d\n", (int)ver_major, (int)ver_minor, (int)ver_patch); dd->dc8051_ver = dc8051_ver(ver_major, ver_minor, ver_patch); ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION); if (ret != HCMD_SUCCESS) { dd_dev_err(dd, "Failed to set host interface version, return 0x%x\n", ret); return -EIO; } return 0; } /* * Write the SBus request register * * No need for masking - the arguments are sized exactly. */ void sbus_request(struct hfi1_devdata *dd, u8 receiver_addr, u8 data_addr, u8 command, u32 data_in) { write_csr(dd, ASIC_CFG_SBUS_REQUEST, ((u64)data_in << ASIC_CFG_SBUS_REQUEST_DATA_IN_SHIFT) | ((u64)command << ASIC_CFG_SBUS_REQUEST_COMMAND_SHIFT) | ((u64)data_addr << ASIC_CFG_SBUS_REQUEST_DATA_ADDR_SHIFT) | ((u64)receiver_addr << ASIC_CFG_SBUS_REQUEST_RECEIVER_ADDR_SHIFT)); } /* * Read a value from the SBus. * * Requires the caller to be in fast mode */ static u32 sbus_read(struct hfi1_devdata *dd, u8 receiver_addr, u8 data_addr, u32 data_in) { u64 reg; int retries; int success = 0; u32 result = 0; u32 result_code = 0; sbus_request(dd, receiver_addr, data_addr, READ_SBUS_RECEIVER, data_in); for (retries = 0; retries < 100; retries++) { usleep_range(1000, 1200); /* arbitrary */ reg = read_csr(dd, ASIC_STS_SBUS_RESULT); result_code = (reg >> ASIC_STS_SBUS_RESULT_RESULT_CODE_SHIFT) & ASIC_STS_SBUS_RESULT_RESULT_CODE_MASK; if (result_code != SBUS_READ_COMPLETE) continue; success = 1; result = (reg >> ASIC_STS_SBUS_RESULT_DATA_OUT_SHIFT) & ASIC_STS_SBUS_RESULT_DATA_OUT_MASK; break; } if (!success) { dd_dev_err(dd, "%s: read failed, result code 0x%x\n", __func__, result_code); } return result; } /* * Turn off the SBus and fabric serdes spicos. * * + Must be called with Sbus fast mode turned on. * + Must be called after fabric serdes broadcast is set up. * + Must be called before the 8051 is loaded - assumes 8051 is not loaded * when using MISC_CFG_FW_CTRL. */ static void turn_off_spicos(struct hfi1_devdata *dd, int flags) { /* only needed on A0 */ if (!is_ax(dd)) return; dd_dev_info(dd, "Turning off spicos:%s%s\n", flags & SPICO_SBUS ? " SBus" : "", flags & SPICO_FABRIC ? " fabric" : ""); write_csr(dd, MISC_CFG_FW_CTRL, ENABLE_SPICO_SMASK); /* disable SBus spico */ if (flags & SPICO_SBUS) sbus_request(dd, SBUS_MASTER_BROADCAST, 0x01, WRITE_SBUS_RECEIVER, 0x00000040); /* disable the fabric serdes spicos */ if (flags & SPICO_FABRIC) sbus_request(dd, fabric_serdes_broadcast[dd->hfi1_id], 0x07, WRITE_SBUS_RECEIVER, 0x00000000); write_csr(dd, MISC_CFG_FW_CTRL, 0); } /* * Reset all of the fabric serdes for this HFI in preparation to take the * link to Polling. * * To do a reset, we need to write to the serdes registers. Unfortunately, * the fabric serdes download to the other HFI on the ASIC will have turned * off the firmware validation on this HFI. This means we can't write to the * registers to reset the serdes. Work around this by performing a complete * re-download and validation of the fabric serdes firmware. This, as a * by-product, will reset the serdes. NOTE: the re-download requires that * the 8051 be in the Offline state. I.e. not actively trying to use the * serdes. This routine is called at the point where the link is Offline and * is getting ready to go to Polling. */ void fabric_serdes_reset(struct hfi1_devdata *dd) { int ret; if (!fw_fabric_serdes_load) return; ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT); if (ret) { dd_dev_err(dd, "Cannot acquire SBus resource to reset fabric SerDes - perhaps you should reboot\n"); return; } set_sbus_fast_mode(dd); if (is_ax(dd)) { /* A0 serdes do not work with a re-download */ u8 ra = fabric_serdes_broadcast[dd->hfi1_id]; /* place SerDes in reset and disable SPICO */ sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000011); /* wait 100 refclk cycles @ 156.25MHz => 640ns */ udelay(1); /* remove SerDes reset */ sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000010); /* turn SPICO enable on */ sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000002); } else { turn_off_spicos(dd, SPICO_FABRIC); /* * No need for firmware retry - what to download has already * been decided. * No need to pay attention to the load return - the only * failure is a validation failure, which has already been * checked by the initial download. */ (void)load_fabric_serdes_firmware(dd, &fw_fabric); } clear_sbus_fast_mode(dd); release_chip_resource(dd, CR_SBUS); } /* Access to the SBus in this routine should probably be serialized */ int sbus_request_slow(struct hfi1_devdata *dd, u8 receiver_addr, u8 data_addr, u8 command, u32 data_in) { u64 reg, count = 0; /* make sure fast mode is clear */ clear_sbus_fast_mode(dd); sbus_request(dd, receiver_addr, data_addr, command, data_in); write_csr(dd, ASIC_CFG_SBUS_EXECUTE, ASIC_CFG_SBUS_EXECUTE_EXECUTE_SMASK); /* Wait for both DONE and RCV_DATA_VALID to go high */ reg = read_csr(dd, ASIC_STS_SBUS_RESULT); while (!((reg & ASIC_STS_SBUS_RESULT_DONE_SMASK) && (reg & ASIC_STS_SBUS_RESULT_RCV_DATA_VALID_SMASK))) { if (count++ >= SBUS_MAX_POLL_COUNT) { u64 counts = read_csr(dd, ASIC_STS_SBUS_COUNTERS); /* * If the loop has timed out, we are OK if DONE bit * is set and RCV_DATA_VALID and EXECUTE counters * are the same. If not, we cannot proceed. */ if ((reg & ASIC_STS_SBUS_RESULT_DONE_SMASK) && (SBUS_COUNTER(counts, RCV_DATA_VALID) == SBUS_COUNTER(counts, EXECUTE))) break; return -ETIMEDOUT; } udelay(1); reg = read_csr(dd, ASIC_STS_SBUS_RESULT); } count = 0; write_csr(dd, ASIC_CFG_SBUS_EXECUTE, 0); /* Wait for DONE to clear after EXECUTE is cleared */ reg = read_csr(dd, ASIC_STS_SBUS_RESULT); while (reg & ASIC_STS_SBUS_RESULT_DONE_SMASK) { if (count++ >= SBUS_MAX_POLL_COUNT) return -ETIME; udelay(1); reg = read_csr(dd, ASIC_STS_SBUS_RESULT); } return 0; } static int load_fabric_serdes_firmware(struct hfi1_devdata *dd, struct firmware_details *fdet) { int i, err; const u8 ra = fabric_serdes_broadcast[dd->hfi1_id]; /* receiver addr */ dd_dev_info(dd, "Downloading fabric firmware\n"); /* step 1: load security variables */ load_security_variables(dd, fdet); /* step 2: place SerDes in reset and disable SPICO */ sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000011); /* wait 100 refclk cycles @ 156.25MHz => 640ns */ udelay(1); /* step 3: remove SerDes reset */ sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000010); /* step 4: assert IMEM override */ sbus_request(dd, ra, 0x00, WRITE_SBUS_RECEIVER, 0x40000000); /* step 5: download SerDes machine code */ for (i = 0; i < fdet->firmware_len; i += 4) { sbus_request(dd, ra, 0x0a, WRITE_SBUS_RECEIVER, *(u32 *)&fdet->firmware_ptr[i]); } /* step 6: IMEM override off */ sbus_request(dd, ra, 0x00, WRITE_SBUS_RECEIVER, 0x00000000); /* step 7: turn ECC on */ sbus_request(dd, ra, 0x0b, WRITE_SBUS_RECEIVER, 0x000c0000); /* steps 8-11: run the RSA engine */ err = run_rsa(dd, "fabric serdes", fdet->signature); if (err) return err; /* step 12: turn SPICO enable on */ sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000002); /* step 13: enable core hardware interrupts */ sbus_request(dd, ra, 0x08, WRITE_SBUS_RECEIVER, 0x00000000); return 0; } static int load_sbus_firmware(struct hfi1_devdata *dd, struct firmware_details *fdet) { int i, err; const u8 ra = SBUS_MASTER_BROADCAST; /* receiver address */ dd_dev_info(dd, "Downloading SBus firmware\n"); /* step 1: load security variables */ load_security_variables(dd, fdet); /* step 2: place SPICO into reset and enable off */ sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x000000c0); /* step 3: remove reset, enable off, IMEM_CNTRL_EN on */ sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000240); /* step 4: set starting IMEM address for burst download */ sbus_request(dd, ra, 0x03, WRITE_SBUS_RECEIVER, 0x80000000); /* step 5: download the SBus Master machine code */ for (i = 0; i < fdet->firmware_len; i += 4) { sbus_request(dd, ra, 0x14, WRITE_SBUS_RECEIVER, *(u32 *)&fdet->firmware_ptr[i]); } /* step 6: set IMEM_CNTL_EN off */ sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000040); /* step 7: turn ECC on */ sbus_request(dd, ra, 0x16, WRITE_SBUS_RECEIVER, 0x000c0000); /* steps 8-11: run the RSA engine */ err = run_rsa(dd, "SBus", fdet->signature); if (err) return err; /* step 12: set SPICO_ENABLE on */ sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000140); return 0; } static int load_pcie_serdes_firmware(struct hfi1_devdata *dd, struct firmware_details *fdet) { int i; const u8 ra = SBUS_MASTER_BROADCAST; /* receiver address */ dd_dev_info(dd, "Downloading PCIe firmware\n"); /* step 1: load security variables */ load_security_variables(dd, fdet); /* step 2: assert single step (halts the SBus Master spico) */ sbus_request(dd, ra, 0x05, WRITE_SBUS_RECEIVER, 0x00000001); /* step 3: enable XDMEM access */ sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000d40); /* step 4: load firmware into SBus Master XDMEM */ /* * NOTE: the dmem address, write_en, and wdata are all pre-packed, * we only need to pick up the bytes and write them */ for (i = 0; i < fdet->firmware_len; i += 4) { sbus_request(dd, ra, 0x04, WRITE_SBUS_RECEIVER, *(u32 *)&fdet->firmware_ptr[i]); } /* step 5: disable XDMEM access */ sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000140); /* step 6: allow SBus Spico to run */ sbus_request(dd, ra, 0x05, WRITE_SBUS_RECEIVER, 0x00000000); /* * steps 7-11: run RSA, if it succeeds, firmware is available to * be swapped */ return run_rsa(dd, "PCIe serdes", fdet->signature); } /* * Set the given broadcast values on the given list of devices. */ static void set_serdes_broadcast(struct hfi1_devdata *dd, u8 bg1, u8 bg2, const u8 *addrs, int count) { while (--count >= 0) { /* * Set BROADCAST_GROUP_1 and BROADCAST_GROUP_2, leave * defaults for everything else. Do not read-modify-write, * per instruction from the manufacturer. * * Register 0xfd: * bits what * ----- --------------------------------- * 0 IGNORE_BROADCAST (default 0) * 11:4 BROADCAST_GROUP_1 (default 0xff) * 23:16 BROADCAST_GROUP_2 (default 0xff) */ sbus_request(dd, addrs[count], 0xfd, WRITE_SBUS_RECEIVER, (u32)bg1 << 4 | (u32)bg2 << 16); } } int acquire_hw_mutex(struct hfi1_devdata *dd) { unsigned long timeout; int try = 0; u8 mask = 1 << dd->hfi1_id; u8 user = (u8)read_csr(dd, ASIC_CFG_MUTEX); if (user == mask) { dd_dev_info(dd, "Hardware mutex already acquired, mutex mask %u\n", (u32)mask); return 0; } retry: timeout = msecs_to_jiffies(HM_TIMEOUT) + jiffies; while (1) { write_csr(dd, ASIC_CFG_MUTEX, mask); user = (u8)read_csr(dd, ASIC_CFG_MUTEX); if (user == mask) return 0; /* success */ if (time_after(jiffies, timeout)) break; /* timed out */ msleep(20); } /* timed out */ dd_dev_err(dd, "Unable to acquire hardware mutex, mutex mask %u, my mask %u (%s)\n", (u32)user, (u32)mask, (try == 0) ? "retrying" : "giving up"); if (try == 0) { /* break mutex and retry */ write_csr(dd, ASIC_CFG_MUTEX, 0); try++; goto retry; } return -EBUSY; } void release_hw_mutex(struct hfi1_devdata *dd) { u8 mask = 1 << dd->hfi1_id; u8 user = (u8)read_csr(dd, ASIC_CFG_MUTEX); if (user != mask) dd_dev_warn(dd, "Unable to release hardware mutex, mutex mask %u, my mask %u\n", (u32)user, (u32)mask); else write_csr(dd, ASIC_CFG_MUTEX, 0); } /* return the given resource bit(s) as a mask for the given HFI */ static inline u64 resource_mask(u32 hfi1_id, u32 resource) { return ((u64)resource) << (hfi1_id ? CR_DYN_SHIFT : 0); } static void fail_mutex_acquire_message(struct hfi1_devdata *dd, const char *func) { dd_dev_err(dd, "%s: hardware mutex stuck - suggest rebooting the machine\n", func); } /* * Acquire access to a chip resource. * * Return 0 on success, -EBUSY if resource busy, -EIO if mutex acquire failed. */ static int __acquire_chip_resource(struct hfi1_devdata *dd, u32 resource) { u64 scratch0, all_bits, my_bit; int ret; if (resource & CR_DYN_MASK) { /* a dynamic resource is in use if either HFI has set the bit */ if (dd->pcidev->device == PCI_DEVICE_ID_INTEL0 && (resource & (CR_I2C1 | CR_I2C2))) { /* discrete devices must serialize across both chains */ all_bits = resource_mask(0, CR_I2C1 | CR_I2C2) | resource_mask(1, CR_I2C1 | CR_I2C2); } else { all_bits = resource_mask(0, resource) | resource_mask(1, resource); } my_bit = resource_mask(dd->hfi1_id, resource); } else { /* non-dynamic resources are not split between HFIs */ all_bits = resource; my_bit = resource; } /* lock against other callers within the driver wanting a resource */ mutex_lock(&dd->asic_data->asic_resource_mutex); ret = acquire_hw_mutex(dd); if (ret) { fail_mutex_acquire_message(dd, __func__); ret = -EIO; goto done; } scratch0 = read_csr(dd, ASIC_CFG_SCRATCH); if (scratch0 & all_bits) { ret = -EBUSY; } else { write_csr(dd, ASIC_CFG_SCRATCH, scratch0 | my_bit); /* force write to be visible to other HFI on another OS */ (void)read_csr(dd, ASIC_CFG_SCRATCH); } release_hw_mutex(dd); done: mutex_unlock(&dd->asic_data->asic_resource_mutex); return ret; } /* * Acquire access to a chip resource, wait up to mswait milliseconds for * the resource to become available. * * Return 0 on success, -EBUSY if busy (even after wait), -EIO if mutex * acquire failed. */ int acquire_chip_resource(struct hfi1_devdata *dd, u32 resource, u32 mswait) { unsigned long timeout; int ret; timeout = jiffies + msecs_to_jiffies(mswait); while (1) { ret = __acquire_chip_resource(dd, resource); if (ret != -EBUSY) return ret; /* resource is busy, check our timeout */ if (time_after_eq(jiffies, timeout)) return -EBUSY; usleep_range(80, 120); /* arbitrary delay */ } } /* * Release access to a chip resource */ void release_chip_resource(struct hfi1_devdata *dd, u32 resource) { u64 scratch0, bit; /* only dynamic resources should ever be cleared */ if (!(resource & CR_DYN_MASK)) { dd_dev_err(dd, "%s: invalid resource 0x%x\n", __func__, resource); return; } bit = resource_mask(dd->hfi1_id, resource); /* lock against other callers within the driver wanting a resource */ mutex_lock(&dd->asic_data->asic_resource_mutex); if (acquire_hw_mutex(dd)) { fail_mutex_acquire_message(dd, __func__); goto done; } scratch0 = read_csr(dd, ASIC_CFG_SCRATCH); if ((scratch0 & bit) != 0) { scratch0 &= ~bit; write_csr(dd, ASIC_CFG_SCRATCH, scratch0); /* force write to be visible to other HFI on another OS */ (void)read_csr(dd, ASIC_CFG_SCRATCH); } else { dd_dev_warn(dd, "%s: id %d, resource 0x%x: bit not set\n", __func__, dd->hfi1_id, resource); } release_hw_mutex(dd); done: mutex_unlock(&dd->asic_data->asic_resource_mutex); } /* * Return true if resource is set, false otherwise. Print a warning * if not set and a function is supplied. */ bool check_chip_resource(struct hfi1_devdata *dd, u32 resource, const char *func) { u64 scratch0, bit; if (resource & CR_DYN_MASK) bit = resource_mask(dd->hfi1_id, resource); else bit = resource; scratch0 = read_csr(dd, ASIC_CFG_SCRATCH); if ((scratch0 & bit) == 0) { if (func) dd_dev_warn(dd, "%s: id %d, resource 0x%x, not acquired!\n", func, dd->hfi1_id, resource); return false; } return true; } static void clear_chip_resources(struct hfi1_devdata *dd, const char *func) { u64 scratch0; /* lock against other callers within the driver wanting a resource */ mutex_lock(&dd->asic_data->asic_resource_mutex); if (acquire_hw_mutex(dd)) { fail_mutex_acquire_message(dd, func); goto done; } /* clear all dynamic access bits for this HFI */ scratch0 = read_csr(dd, ASIC_CFG_SCRATCH); scratch0 &= ~resource_mask(dd->hfi1_id, CR_DYN_MASK); write_csr(dd, ASIC_CFG_SCRATCH, scratch0); /* force write to be visible to other HFI on another OS */ (void)read_csr(dd, ASIC_CFG_SCRATCH); release_hw_mutex(dd); done: mutex_unlock(&dd->asic_data->asic_resource_mutex); } void init_chip_resources(struct hfi1_devdata *dd) { /* clear any holds left by us */ clear_chip_resources(dd, __func__); } void finish_chip_resources(struct hfi1_devdata *dd) { /* clear any holds left by us */ clear_chip_resources(dd, __func__); } void set_sbus_fast_mode(struct hfi1_devdata *dd) { write_csr(dd, ASIC_CFG_SBUS_EXECUTE, ASIC_CFG_SBUS_EXECUTE_FAST_MODE_SMASK); } void clear_sbus_fast_mode(struct hfi1_devdata *dd) { u64 reg, count = 0; reg = read_csr(dd, ASIC_STS_SBUS_COUNTERS); while (SBUS_COUNTER(reg, EXECUTE) != SBUS_COUNTER(reg, RCV_DATA_VALID)) { if (count++ >= SBUS_MAX_POLL_COUNT) break; udelay(1); reg = read_csr(dd, ASIC_STS_SBUS_COUNTERS); } write_csr(dd, ASIC_CFG_SBUS_EXECUTE, 0); } int load_firmware(struct hfi1_devdata *dd) { int ret; if (fw_fabric_serdes_load) { ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT); if (ret) return ret; set_sbus_fast_mode(dd); set_serdes_broadcast(dd, all_fabric_serdes_broadcast, fabric_serdes_broadcast[dd->hfi1_id], fabric_serdes_addrs[dd->hfi1_id], NUM_FABRIC_SERDES); turn_off_spicos(dd, SPICO_FABRIC); do { ret = load_fabric_serdes_firmware(dd, &fw_fabric); } while (retry_firmware(dd, ret)); clear_sbus_fast_mode(dd); release_chip_resource(dd, CR_SBUS); if (ret) return ret; } if (fw_8051_load) { do { ret = load_8051_firmware(dd, &fw_8051); } while (retry_firmware(dd, ret)); if (ret) return ret; } dump_fw_version(dd); return 0; } int hfi1_firmware_init(struct hfi1_devdata *dd) { /* only RTL can use these */ if (dd->icode != ICODE_RTL_SILICON) { fw_fabric_serdes_load = 0; fw_pcie_serdes_load = 0; fw_sbus_load = 0; } /* no 8051 or QSFP on simulator */ if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) fw_8051_load = 0; if (!fw_8051_name) { if (dd->icode == ICODE_RTL_SILICON) fw_8051_name = DEFAULT_FW_8051_NAME_ASIC; else fw_8051_name = DEFAULT_FW_8051_NAME_FPGA; } if (!fw_fabric_serdes_name) fw_fabric_serdes_name = DEFAULT_FW_FABRIC_NAME; if (!fw_sbus_name) fw_sbus_name = DEFAULT_FW_SBUS_NAME; if (!fw_pcie_serdes_name) fw_pcie_serdes_name = DEFAULT_FW_PCIE_NAME; return obtain_firmware(dd); } /* * This function is a helper function for parse_platform_config(...) and * does not check for validity of the platform configuration cache * (because we know it is invalid as we are building up the cache). * As such, this should not be called from anywhere other than * parse_platform_config */ static int check_meta_version(struct hfi1_devdata *dd, u32 *system_table) { u32 meta_ver, meta_ver_meta, ver_start, ver_len, mask; struct platform_config_cache *pcfgcache = &dd->pcfg_cache; if (!system_table) return -EINVAL; meta_ver_meta = *(pcfgcache->config_tables[PLATFORM_CONFIG_SYSTEM_TABLE].table_metadata + SYSTEM_TABLE_META_VERSION); mask = ((1 << METADATA_TABLE_FIELD_START_LEN_BITS) - 1); ver_start = meta_ver_meta & mask; meta_ver_meta >>= METADATA_TABLE_FIELD_LEN_SHIFT; mask = ((1 << METADATA_TABLE_FIELD_LEN_LEN_BITS) - 1); ver_len = meta_ver_meta & mask; ver_start /= 8; meta_ver = *((u8 *)system_table + ver_start) & ((1 << ver_len) - 1); if (meta_ver < 4) { dd_dev_info( dd, "%s:Please update platform config\n", __func__); return -EINVAL; } return 0; } int parse_platform_config(struct hfi1_devdata *dd) { struct platform_config_cache *pcfgcache = &dd->pcfg_cache; struct hfi1_pportdata *ppd = dd->pport; u32 *ptr = NULL; u32 header1 = 0, header2 = 0, magic_num = 0, crc = 0, file_length = 0; u32 record_idx = 0, table_type = 0, table_length_dwords = 0; int ret = -EINVAL; /* assume failure */ /* * For integrated devices that did not fall back to the default file, * the SI tuning information for active channels is acquired from the * scratch register bitmap, thus there is no platform config to parse. * Skip parsing in these situations. */ if (ppd->config_from_scratch) return 0; if (!dd->platform_config.data) { dd_dev_err(dd, "%s: Missing config file\n", __func__); ret = -EINVAL; goto bail; } ptr = (u32 *)dd->platform_config.data; magic_num = *ptr; ptr++; if (magic_num != PLATFORM_CONFIG_MAGIC_NUM) { dd_dev_err(dd, "%s: Bad config file\n", __func__); ret = -EINVAL; goto bail; } /* Field is file size in DWORDs */ file_length = (*ptr) * 4; /* * Length can't be larger than partition size. Assume platform * config format version 4 is being used. Interpret the file size * field as header instead by not moving the pointer. */ if (file_length > MAX_PLATFORM_CONFIG_FILE_SIZE) { dd_dev_info(dd, "%s:File length out of bounds, using alternative format\n", __func__); file_length = PLATFORM_CONFIG_FORMAT_4_FILE_SIZE; } else { ptr++; } if (file_length > dd->platform_config.size) { dd_dev_info(dd, "%s:File claims to be larger than read size\n", __func__); ret = -EINVAL; goto bail; } else if (file_length < dd->platform_config.size) { dd_dev_info(dd, "%s:File claims to be smaller than read size, continuing\n", __func__); } /* exactly equal, perfection */ /* * In both cases where we proceed, using the self-reported file length * is the safer option. In case of old format a predefined value is * being used. */ while (ptr < (u32 *)(dd->platform_config.data + file_length)) { header1 = *ptr; header2 = *(ptr + 1); if (header1 != ~header2) { dd_dev_err(dd, "%s: Failed validation at offset %ld\n", __func__, (ptr - (u32 *) dd->platform_config.data)); ret = -EINVAL; goto bail; } record_idx = *ptr & ((1 << PLATFORM_CONFIG_HEADER_RECORD_IDX_LEN_BITS) - 1); table_length_dwords = (*ptr >> PLATFORM_CONFIG_HEADER_TABLE_LENGTH_SHIFT) & ((1 << PLATFORM_CONFIG_HEADER_TABLE_LENGTH_LEN_BITS) - 1); table_type = (*ptr >> PLATFORM_CONFIG_HEADER_TABLE_TYPE_SHIFT) & ((1 << PLATFORM_CONFIG_HEADER_TABLE_TYPE_LEN_BITS) - 1); /* Done with this set of headers */ ptr += 2; if (record_idx) { /* data table */ switch (table_type) { case PLATFORM_CONFIG_SYSTEM_TABLE: pcfgcache->config_tables[table_type].num_table = 1; ret = check_meta_version(dd, ptr); if (ret) goto bail; break; case PLATFORM_CONFIG_PORT_TABLE: pcfgcache->config_tables[table_type].num_table = 2; break; case PLATFORM_CONFIG_RX_PRESET_TABLE: case PLATFORM_CONFIG_TX_PRESET_TABLE: case PLATFORM_CONFIG_QSFP_ATTEN_TABLE: case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE: pcfgcache->config_tables[table_type].num_table = table_length_dwords; break; default: dd_dev_err(dd, "%s: Unknown data table %d, offset %ld\n", __func__, table_type, (ptr - (u32 *) dd->platform_config.data)); ret = -EINVAL; goto bail; /* We don't trust this file now */ } pcfgcache->config_tables[table_type].table = ptr; } else { /* metadata table */ switch (table_type) { case PLATFORM_CONFIG_SYSTEM_TABLE: case PLATFORM_CONFIG_PORT_TABLE: case PLATFORM_CONFIG_RX_PRESET_TABLE: case PLATFORM_CONFIG_TX_PRESET_TABLE: case PLATFORM_CONFIG_QSFP_ATTEN_TABLE: case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE: break; default: dd_dev_err(dd, "%s: Unknown meta table %d, offset %ld\n", __func__, table_type, (ptr - (u32 *)dd->platform_config.data)); ret = -EINVAL; goto bail; /* We don't trust this file now */ } pcfgcache->config_tables[table_type].table_metadata = ptr; } /* Calculate and check table crc */ crc = crc32_le(~(u32)0, (unsigned char const *)ptr, (table_length_dwords * 4)); crc ^= ~(u32)0; /* Jump the table */ ptr += table_length_dwords; if (crc != *ptr) { dd_dev_err(dd, "%s: Failed CRC check at offset %ld\n", __func__, (ptr - (u32 *)dd->platform_config.data)); ret = -EINVAL; goto bail; } /* Jump the CRC DWORD */ ptr++; } pcfgcache->cache_valid = 1; return 0; bail: memset(pcfgcache, 0, sizeof(struct platform_config_cache)); return ret; } static void get_integrated_platform_config_field( struct hfi1_devdata *dd, enum platform_config_table_type_encoding table_type, int field_index, u32 *data) { struct hfi1_pportdata *ppd = dd->pport; u8 *cache = ppd->qsfp_info.cache; u32 tx_preset = 0; switch (table_type) { case PLATFORM_CONFIG_SYSTEM_TABLE: if (field_index == SYSTEM_TABLE_QSFP_POWER_CLASS_MAX) *data = ppd->max_power_class; else if (field_index == SYSTEM_TABLE_QSFP_ATTENUATION_DEFAULT_25G) *data = ppd->default_atten; break; case PLATFORM_CONFIG_PORT_TABLE: if (field_index == PORT_TABLE_PORT_TYPE) *data = ppd->port_type; else if (field_index == PORT_TABLE_LOCAL_ATTEN_25G) *data = ppd->local_atten; else if (field_index == PORT_TABLE_REMOTE_ATTEN_25G) *data = ppd->remote_atten; break; case PLATFORM_CONFIG_RX_PRESET_TABLE: if (field_index == RX_PRESET_TABLE_QSFP_RX_CDR_APPLY) *data = (ppd->rx_preset & QSFP_RX_CDR_APPLY_SMASK) >> QSFP_RX_CDR_APPLY_SHIFT; else if (field_index == RX_PRESET_TABLE_QSFP_RX_EMP_APPLY) *data = (ppd->rx_preset & QSFP_RX_EMP_APPLY_SMASK) >> QSFP_RX_EMP_APPLY_SHIFT; else if (field_index == RX_PRESET_TABLE_QSFP_RX_AMP_APPLY) *data = (ppd->rx_preset & QSFP_RX_AMP_APPLY_SMASK) >> QSFP_RX_AMP_APPLY_SHIFT; else if (field_index == RX_PRESET_TABLE_QSFP_RX_CDR) *data = (ppd->rx_preset & QSFP_RX_CDR_SMASK) >> QSFP_RX_CDR_SHIFT; else if (field_index == RX_PRESET_TABLE_QSFP_RX_EMP) *data = (ppd->rx_preset & QSFP_RX_EMP_SMASK) >> QSFP_RX_EMP_SHIFT; else if (field_index == RX_PRESET_TABLE_QSFP_RX_AMP) *data = (ppd->rx_preset & QSFP_RX_AMP_SMASK) >> QSFP_RX_AMP_SHIFT; break; case PLATFORM_CONFIG_TX_PRESET_TABLE: if (cache[QSFP_EQ_INFO_OFFS] & 0x4) tx_preset = ppd->tx_preset_eq; else tx_preset = ppd->tx_preset_noeq; if (field_index == TX_PRESET_TABLE_PRECUR) *data = (tx_preset & TX_PRECUR_SMASK) >> TX_PRECUR_SHIFT; else if (field_index == TX_PRESET_TABLE_ATTN) *data = (tx_preset & TX_ATTN_SMASK) >> TX_ATTN_SHIFT; else if (field_index == TX_PRESET_TABLE_POSTCUR) *data = (tx_preset & TX_POSTCUR_SMASK) >> TX_POSTCUR_SHIFT; else if (field_index == TX_PRESET_TABLE_QSFP_TX_CDR_APPLY) *data = (tx_preset & QSFP_TX_CDR_APPLY_SMASK) >> QSFP_TX_CDR_APPLY_SHIFT; else if (field_index == TX_PRESET_TABLE_QSFP_TX_EQ_APPLY) *data = (tx_preset & QSFP_TX_EQ_APPLY_SMASK) >> QSFP_TX_EQ_APPLY_SHIFT; else if (field_index == TX_PRESET_TABLE_QSFP_TX_CDR) *data = (tx_preset & QSFP_TX_CDR_SMASK) >> QSFP_TX_CDR_SHIFT; else if (field_index == TX_PRESET_TABLE_QSFP_TX_EQ) *data = (tx_preset & QSFP_TX_EQ_SMASK) >> QSFP_TX_EQ_SHIFT; break; case PLATFORM_CONFIG_QSFP_ATTEN_TABLE: case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE: default: break; } } static int get_platform_fw_field_metadata(struct hfi1_devdata *dd, int table, int field, u32 *field_len_bits, u32 *field_start_bits) { struct platform_config_cache *pcfgcache = &dd->pcfg_cache; u32 *src_ptr = NULL; if (!pcfgcache->cache_valid) return -EINVAL; switch (table) { case PLATFORM_CONFIG_SYSTEM_TABLE: case PLATFORM_CONFIG_PORT_TABLE: case PLATFORM_CONFIG_RX_PRESET_TABLE: case PLATFORM_CONFIG_TX_PRESET_TABLE: case PLATFORM_CONFIG_QSFP_ATTEN_TABLE: case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE: if (field && field < platform_config_table_limits[table]) src_ptr = pcfgcache->config_tables[table].table_metadata + field; break; default: dd_dev_info(dd, "%s: Unknown table\n", __func__); break; } if (!src_ptr) return -EINVAL; if (field_start_bits) *field_start_bits = *src_ptr & ((1 << METADATA_TABLE_FIELD_START_LEN_BITS) - 1); if (field_len_bits) *field_len_bits = (*src_ptr >> METADATA_TABLE_FIELD_LEN_SHIFT) & ((1 << METADATA_TABLE_FIELD_LEN_LEN_BITS) - 1); return 0; } /* This is the central interface to getting data out of the platform config * file. It depends on parse_platform_config() having populated the * platform_config_cache in hfi1_devdata, and checks the cache_valid member to * validate the sanity of the cache. * * The non-obvious parameters: * @table_index: Acts as a look up key into which instance of the tables the * relevant field is fetched from. * * This applies to the data tables that have multiple instances. The port table * is an exception to this rule as each HFI only has one port and thus the * relevant table can be distinguished by hfi_id. * * @data: pointer to memory that will be populated with the field requested. * @len: length of memory pointed by @data in bytes. */ int get_platform_config_field(struct hfi1_devdata *dd, enum platform_config_table_type_encoding table_type, int table_index, int field_index, u32 *data, u32 len) { int ret = 0, wlen = 0, seek = 0; u32 field_len_bits = 0, field_start_bits = 0, *src_ptr = NULL; struct platform_config_cache *pcfgcache = &dd->pcfg_cache; struct hfi1_pportdata *ppd = dd->pport; if (data) memset(data, 0, len); else return -EINVAL; if (ppd->config_from_scratch) { /* * Use saved configuration from ppd for integrated platforms */ get_integrated_platform_config_field(dd, table_type, field_index, data); return 0; } ret = get_platform_fw_field_metadata(dd, table_type, field_index, &field_len_bits, &field_start_bits); if (ret) return -EINVAL; /* Convert length to bits */ len *= 8; /* Our metadata function checked cache_valid and field_index for us */ switch (table_type) { case PLATFORM_CONFIG_SYSTEM_TABLE: src_ptr = pcfgcache->config_tables[table_type].table; if (field_index != SYSTEM_TABLE_QSFP_POWER_CLASS_MAX) { if (len < field_len_bits) return -EINVAL; seek = field_start_bits / 8; wlen = field_len_bits / 8; src_ptr = (u32 *)((u8 *)src_ptr + seek); /* * We expect the field to be byte aligned and whole byte * lengths if we are here */ memcpy(data, src_ptr, wlen); return 0; } break; case PLATFORM_CONFIG_PORT_TABLE: /* Port table is 4 DWORDS */ src_ptr = dd->hfi1_id ? pcfgcache->config_tables[table_type].table + 4 : pcfgcache->config_tables[table_type].table; break; case PLATFORM_CONFIG_RX_PRESET_TABLE: case PLATFORM_CONFIG_TX_PRESET_TABLE: case PLATFORM_CONFIG_QSFP_ATTEN_TABLE: case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE: src_ptr = pcfgcache->config_tables[table_type].table; if (table_index < pcfgcache->config_tables[table_type].num_table) src_ptr += table_index; else src_ptr = NULL; break; default: dd_dev_info(dd, "%s: Unknown table\n", __func__); break; } if (!src_ptr || len < field_len_bits) return -EINVAL; src_ptr += (field_start_bits / 32); *data = (*src_ptr >> (field_start_bits % 32)) & ((1 << field_len_bits) - 1); return 0; } /* * Download the firmware needed for the Gen3 PCIe SerDes. An update * to the SBus firmware is needed before updating the PCIe firmware. * * Note: caller must be holding the SBus resource. */ int load_pcie_firmware(struct hfi1_devdata *dd) { int ret = 0; /* both firmware loads below use the SBus */ set_sbus_fast_mode(dd); if (fw_sbus_load) { turn_off_spicos(dd, SPICO_SBUS); do { ret = load_sbus_firmware(dd, &fw_sbus); } while (retry_firmware(dd, ret)); if (ret) goto done; } if (fw_pcie_serdes_load) { dd_dev_info(dd, "Setting PCIe SerDes broadcast\n"); set_serdes_broadcast(dd, all_pcie_serdes_broadcast, pcie_serdes_broadcast[dd->hfi1_id], pcie_serdes_addrs[dd->hfi1_id], NUM_PCIE_SERDES); do { ret = load_pcie_serdes_firmware(dd, &fw_pcie); } while (retry_firmware(dd, ret)); if (ret) goto done; } done: clear_sbus_fast_mode(dd); return ret; } /* * Read the GUID from the hardware, store it in dd. */ void read_guid(struct hfi1_devdata *dd) { /* Take the DC out of reset to get a valid GUID value */ write_csr(dd, CCE_DC_CTRL, 0); (void)read_csr(dd, CCE_DC_CTRL); dd->base_guid = read_csr(dd, DC_DC8051_CFG_LOCAL_GUID); dd_dev_info(dd, "GUID %llx", (unsigned long long)dd->base_guid); } /* read and display firmware version info */ static void dump_fw_version(struct hfi1_devdata *dd) { u32 pcie_vers[NUM_PCIE_SERDES]; u32 fabric_vers[NUM_FABRIC_SERDES]; u32 sbus_vers; int i; int all_same; int ret; u8 rcv_addr; ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT); if (ret) { dd_dev_err(dd, "Unable to acquire SBus to read firmware versions\n"); return; } /* set fast mode */ set_sbus_fast_mode(dd); /* read version for SBus Master */ sbus_request(dd, SBUS_MASTER_BROADCAST, 0x02, WRITE_SBUS_RECEIVER, 0); sbus_request(dd, SBUS_MASTER_BROADCAST, 0x07, WRITE_SBUS_RECEIVER, 0x1); /* wait for interrupt to be processed */ usleep_range(10000, 11000); sbus_vers = sbus_read(dd, SBUS_MASTER_BROADCAST, 0x08, 0x1); dd_dev_info(dd, "SBus Master firmware version 0x%08x\n", sbus_vers); /* read version for PCIe SerDes */ all_same = 1; pcie_vers[0] = 0; for (i = 0; i < NUM_PCIE_SERDES; i++) { rcv_addr = pcie_serdes_addrs[dd->hfi1_id][i]; sbus_request(dd, rcv_addr, 0x03, WRITE_SBUS_RECEIVER, 0); /* wait for interrupt to be processed */ usleep_range(10000, 11000); pcie_vers[i] = sbus_read(dd, rcv_addr, 0x04, 0x0); if (i > 0 && pcie_vers[0] != pcie_vers[i]) all_same = 0; } if (all_same) { dd_dev_info(dd, "PCIe SerDes firmware version 0x%x\n", pcie_vers[0]); } else { dd_dev_warn(dd, "PCIe SerDes do not have the same firmware version\n"); for (i = 0; i < NUM_PCIE_SERDES; i++) { dd_dev_info(dd, "PCIe SerDes lane %d firmware version 0x%x\n", i, pcie_vers[i]); } } /* read version for fabric SerDes */ all_same = 1; fabric_vers[0] = 0; for (i = 0; i < NUM_FABRIC_SERDES; i++) { rcv_addr = fabric_serdes_addrs[dd->hfi1_id][i]; sbus_request(dd, rcv_addr, 0x03, WRITE_SBUS_RECEIVER, 0); /* wait for interrupt to be processed */ usleep_range(10000, 11000); fabric_vers[i] = sbus_read(dd, rcv_addr, 0x04, 0x0); if (i > 0 && fabric_vers[0] != fabric_vers[i]) all_same = 0; } if (all_same) { dd_dev_info(dd, "Fabric SerDes firmware version 0x%x\n", fabric_vers[0]); } else { dd_dev_warn(dd, "Fabric SerDes do not have the same firmware version\n"); for (i = 0; i < NUM_FABRIC_SERDES; i++) { dd_dev_info(dd, "Fabric SerDes lane %d firmware version 0x%x\n", i, fabric_vers[i]); } } clear_sbus_fast_mode(dd); release_chip_resource(dd, CR_SBUS); }
linux-master
drivers/infiniband/hw/hfi1/firmware.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2015 - 2020 Intel Corporation. * Copyright(c) 2021 Cornelis Networks. */ /* * This file contains all of the code that is specific to the HFI chip */ #include <linux/pci.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/module.h> #include "hfi.h" #include "trace.h" #include "mad.h" #include "pio.h" #include "sdma.h" #include "eprom.h" #include "efivar.h" #include "platform.h" #include "aspm.h" #include "affinity.h" #include "debugfs.h" #include "fault.h" #include "netdev.h" uint num_vls = HFI1_MAX_VLS_SUPPORTED; module_param(num_vls, uint, S_IRUGO); MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)"); /* * Default time to aggregate two 10K packets from the idle state * (timer not running). The timer starts at the end of the first packet, * so only the time for one 10K packet and header plus a bit extra is needed. * 10 * 1024 + 64 header byte = 10304 byte * 10304 byte / 12.5 GB/s = 824.32ns */ uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */ module_param(rcv_intr_timeout, uint, S_IRUGO); MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns"); uint rcv_intr_count = 16; /* same as qib */ module_param(rcv_intr_count, uint, S_IRUGO); MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count"); ushort link_crc_mask = SUPPORTED_CRCS; module_param(link_crc_mask, ushort, S_IRUGO); MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link"); uint loopback; module_param_named(loopback, loopback, uint, S_IRUGO); MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable"); /* Other driver tunables */ uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/ static ushort crc_14b_sideband = 1; static uint use_flr = 1; uint quick_linkup; /* skip LNI */ struct flag_table { u64 flag; /* the flag */ char *str; /* description string */ u16 extra; /* extra information */ u16 unused0; u32 unused1; }; /* str must be a string constant */ #define FLAG_ENTRY(str, extra, flag) {flag, str, extra} #define FLAG_ENTRY0(str, flag) {flag, str, 0} /* Send Error Consequences */ #define SEC_WRITE_DROPPED 0x1 #define SEC_PACKET_DROPPED 0x2 #define SEC_SC_HALTED 0x4 /* per-context only */ #define SEC_SPC_FREEZE 0x8 /* per-HFI only */ #define DEFAULT_KRCVQS 2 #define MIN_KERNEL_KCTXTS 2 #define FIRST_KERNEL_KCTXT 1 /* * RSM instance allocation * 0 - User Fecn Handling * 1 - Vnic * 2 - AIP * 3 - Verbs */ #define RSM_INS_FECN 0 #define RSM_INS_VNIC 1 #define RSM_INS_AIP 2 #define RSM_INS_VERBS 3 /* Bit offset into the GUID which carries HFI id information */ #define GUID_HFI_INDEX_SHIFT 39 /* extract the emulation revision */ #define emulator_rev(dd) ((dd)->irev >> 8) /* parallel and serial emulation versions are 3 and 4 respectively */ #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3) #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4) /* RSM fields for Verbs */ /* packet type */ #define IB_PACKET_TYPE 2ull #define QW_SHIFT 6ull /* QPN[7..1] */ #define QPN_WIDTH 7ull /* LRH.BTH: QW 0, OFFSET 48 - for match */ #define LRH_BTH_QW 0ull #define LRH_BTH_BIT_OFFSET 48ull #define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off)) #define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET) #define LRH_BTH_SELECT #define LRH_BTH_MASK 3ull #define LRH_BTH_VALUE 2ull /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */ #define LRH_SC_QW 0ull #define LRH_SC_BIT_OFFSET 56ull #define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off)) #define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET) #define LRH_SC_MASK 128ull #define LRH_SC_VALUE 0ull /* SC[n..0] QW 0, OFFSET 60 - for select */ #define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull)) /* QPN[m+n:1] QW 1, OFFSET 1 */ #define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull)) /* RSM fields for AIP */ /* LRH.BTH above is reused for this rule */ /* BTH.DESTQP: QW 1, OFFSET 16 for match */ #define BTH_DESTQP_QW 1ull #define BTH_DESTQP_BIT_OFFSET 16ull #define BTH_DESTQP_OFFSET(off) ((BTH_DESTQP_QW << QW_SHIFT) | (off)) #define BTH_DESTQP_MATCH_OFFSET BTH_DESTQP_OFFSET(BTH_DESTQP_BIT_OFFSET) #define BTH_DESTQP_MASK 0xFFull #define BTH_DESTQP_VALUE 0x81ull /* DETH.SQPN: QW 1 Offset 56 for select */ /* We use 8 most significant Soure QPN bits as entropy fpr AIP */ #define DETH_AIP_SQPN_QW 3ull #define DETH_AIP_SQPN_BIT_OFFSET 56ull #define DETH_AIP_SQPN_OFFSET(off) ((DETH_AIP_SQPN_QW << QW_SHIFT) | (off)) #define DETH_AIP_SQPN_SELECT_OFFSET \ DETH_AIP_SQPN_OFFSET(DETH_AIP_SQPN_BIT_OFFSET) /* RSM fields for Vnic */ /* L2_TYPE: QW 0, OFFSET 61 - for match */ #define L2_TYPE_QW 0ull #define L2_TYPE_BIT_OFFSET 61ull #define L2_TYPE_OFFSET(off) ((L2_TYPE_QW << QW_SHIFT) | (off)) #define L2_TYPE_MATCH_OFFSET L2_TYPE_OFFSET(L2_TYPE_BIT_OFFSET) #define L2_TYPE_MASK 3ull #define L2_16B_VALUE 2ull /* L4_TYPE QW 1, OFFSET 0 - for match */ #define L4_TYPE_QW 1ull #define L4_TYPE_BIT_OFFSET 0ull #define L4_TYPE_OFFSET(off) ((L4_TYPE_QW << QW_SHIFT) | (off)) #define L4_TYPE_MATCH_OFFSET L4_TYPE_OFFSET(L4_TYPE_BIT_OFFSET) #define L4_16B_TYPE_MASK 0xFFull #define L4_16B_ETH_VALUE 0x78ull /* 16B VESWID - for select */ #define L4_16B_HDR_VESWID_OFFSET ((2 << QW_SHIFT) | (16ull)) /* 16B ENTROPY - for select */ #define L2_16B_ENTROPY_OFFSET ((1 << QW_SHIFT) | (32ull)) /* defines to build power on SC2VL table */ #define SC2VL_VAL( \ num, \ sc0, sc0val, \ sc1, sc1val, \ sc2, sc2val, \ sc3, sc3val, \ sc4, sc4val, \ sc5, sc5val, \ sc6, sc6val, \ sc7, sc7val) \ ( \ ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \ ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \ ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \ ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \ ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \ ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \ ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \ ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \ ) #define DC_SC_VL_VAL( \ range, \ e0, e0val, \ e1, e1val, \ e2, e2val, \ e3, e3val, \ e4, e4val, \ e5, e5val, \ e6, e6val, \ e7, e7val, \ e8, e8val, \ e9, e9val, \ e10, e10val, \ e11, e11val, \ e12, e12val, \ e13, e13val, \ e14, e14val, \ e15, e15val) \ ( \ ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \ ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \ ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \ ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \ ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \ ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \ ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \ ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \ ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \ ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \ ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \ ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \ ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \ ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \ ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \ ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \ ) /* all CceStatus sub-block freeze bits */ #define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \ | CCE_STATUS_RXE_FROZE_SMASK \ | CCE_STATUS_TXE_FROZE_SMASK \ | CCE_STATUS_TXE_PIO_FROZE_SMASK) /* all CceStatus sub-block TXE pause bits */ #define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \ | CCE_STATUS_TXE_PAUSED_SMASK \ | CCE_STATUS_SDMA_PAUSED_SMASK) /* all CceStatus sub-block RXE pause bits */ #define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK #define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL #define CNTR_32BIT_MAX 0x00000000FFFFFFFF /* * CCE Error flags. */ static struct flag_table cce_err_status_flags[] = { /* 0*/ FLAG_ENTRY0("CceCsrParityErr", CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK), /* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr", CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK), /* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr", CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK), /* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr", CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK), /* 4*/ FLAG_ENTRY0("CceTrgtAccessErr", CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK), /* 5*/ FLAG_ENTRY0("CceRspdDataParityErr", CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK), /* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr", CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK), /* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr", CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK), /* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr", CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK), /* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr", CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK), /*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr", CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK), /*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError", CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK), /*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError", CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK), /*13*/ FLAG_ENTRY0("PcicRetryMemCorErr", CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK), /*14*/ FLAG_ENTRY0("PcicRetryMemCorErr", CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK), /*15*/ FLAG_ENTRY0("PcicPostHdQCorErr", CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK), /*16*/ FLAG_ENTRY0("PcicPostHdQCorErr", CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK), /*17*/ FLAG_ENTRY0("PcicPostHdQCorErr", CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK), /*18*/ FLAG_ENTRY0("PcicCplDatQCorErr", CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK), /*19*/ FLAG_ENTRY0("PcicNPostHQParityErr", CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK), /*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr", CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK), /*21*/ FLAG_ENTRY0("PcicRetryMemUncErr", CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK), /*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr", CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK), /*23*/ FLAG_ENTRY0("PcicPostHdQUncErr", CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK), /*24*/ FLAG_ENTRY0("PcicPostDatQUncErr", CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK), /*25*/ FLAG_ENTRY0("PcicCplHdQUncErr", CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK), /*26*/ FLAG_ENTRY0("PcicCplDatQUncErr", CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK), /*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr", CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK), /*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr", CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK), /*29*/ FLAG_ENTRY0("PcicReceiveParityErr", CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK), /*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr", CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK), /*31*/ FLAG_ENTRY0("LATriggered", CCE_ERR_STATUS_LA_TRIGGERED_SMASK), /*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr", CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK), /*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr", CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK), /*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr", CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK), /*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr", CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK), /*36*/ FLAG_ENTRY0("CceMsixTableCorErr", CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK), /*37*/ FLAG_ENTRY0("CceMsixTableUncErr", CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK), /*38*/ FLAG_ENTRY0("CceIntMapCorErr", CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK), /*39*/ FLAG_ENTRY0("CceIntMapUncErr", CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK), /*40*/ FLAG_ENTRY0("CceMsixCsrParityErr", CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK), /*41-63 reserved*/ }; /* * Misc Error flags */ #define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK static struct flag_table misc_err_status_flags[] = { /* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)), /* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)), /* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)), /* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)), /* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)), /* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)), /* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)), /* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)), /* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)), /* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)), /*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)), /*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)), /*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL)) }; /* * TXE PIO Error flags and consequences */ static struct flag_table pio_err_status_flags[] = { /* 0*/ FLAG_ENTRY("PioWriteBadCtxt", SEC_WRITE_DROPPED, SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK), /* 1*/ FLAG_ENTRY("PioWriteAddrParity", SEC_SPC_FREEZE, SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK), /* 2*/ FLAG_ENTRY("PioCsrParity", SEC_SPC_FREEZE, SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK), /* 3*/ FLAG_ENTRY("PioSbMemFifo0", SEC_SPC_FREEZE, SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK), /* 4*/ FLAG_ENTRY("PioSbMemFifo1", SEC_SPC_FREEZE, SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK), /* 5*/ FLAG_ENTRY("PioPccFifoParity", SEC_SPC_FREEZE, SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK), /* 6*/ FLAG_ENTRY("PioPecFifoParity", SEC_SPC_FREEZE, SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK), /* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity", SEC_SPC_FREEZE, SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK), /* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity", SEC_SPC_FREEZE, SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK), /* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr", SEC_SPC_FREEZE, SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK), /*10*/ FLAG_ENTRY("PioSmPktResetParity", SEC_SPC_FREEZE, SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK), /*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc", SEC_SPC_FREEZE, SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK), /*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc", SEC_SPC_FREEZE, SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK), /*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor", 0, SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK), /*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor", 0, SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK), /*15*/ FLAG_ENTRY("PioCreditRetFifoParity", SEC_SPC_FREEZE, SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK), /*16*/ FLAG_ENTRY("PioPpmcPblFifo", SEC_SPC_FREEZE, SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK), /*17*/ FLAG_ENTRY("PioInitSmIn", 0, SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK), /*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm", SEC_SPC_FREEZE, SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK), /*19*/ FLAG_ENTRY("PioHostAddrMemUnc", SEC_SPC_FREEZE, SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK), /*20*/ FLAG_ENTRY("PioHostAddrMemCor", 0, SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK), /*21*/ FLAG_ENTRY("PioWriteDataParity", SEC_SPC_FREEZE, SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK), /*22*/ FLAG_ENTRY("PioStateMachine", SEC_SPC_FREEZE, SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK), /*23*/ FLAG_ENTRY("PioWriteQwValidParity", SEC_WRITE_DROPPED | SEC_SPC_FREEZE, SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK), /*24*/ FLAG_ENTRY("PioBlockQwCountParity", SEC_WRITE_DROPPED | SEC_SPC_FREEZE, SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK), /*25*/ FLAG_ENTRY("PioVlfVlLenParity", SEC_SPC_FREEZE, SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK), /*26*/ FLAG_ENTRY("PioVlfSopParity", SEC_SPC_FREEZE, SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK), /*27*/ FLAG_ENTRY("PioVlFifoParity", SEC_SPC_FREEZE, SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK), /*28*/ FLAG_ENTRY("PioPpmcBqcMemParity", SEC_SPC_FREEZE, SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK), /*29*/ FLAG_ENTRY("PioPpmcSopLen", SEC_SPC_FREEZE, SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK), /*30-31 reserved*/ /*32*/ FLAG_ENTRY("PioCurrentFreeCntParity", SEC_SPC_FREEZE, SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK), /*33*/ FLAG_ENTRY("PioLastReturnedCntParity", SEC_SPC_FREEZE, SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK), /*34*/ FLAG_ENTRY("PioPccSopHeadParity", SEC_SPC_FREEZE, SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK), /*35*/ FLAG_ENTRY("PioPecSopHeadParityErr", SEC_SPC_FREEZE, SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK), /*36-63 reserved*/ }; /* TXE PIO errors that cause an SPC freeze */ #define ALL_PIO_FREEZE_ERR \ (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \ | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \ | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \ | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \ | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \ | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \ | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \ | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \ | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \ | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \ | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \ | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \ | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \ | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \ | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \ | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \ | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \ | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \ | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \ | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \ | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \ | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \ | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \ | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \ | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \ | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \ | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \ | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \ | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK) /* * TXE SDMA Error flags */ static struct flag_table sdma_err_status_flags[] = { /* 0*/ FLAG_ENTRY0("SDmaRpyTagErr", SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK), /* 1*/ FLAG_ENTRY0("SDmaCsrParityErr", SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK), /* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr", SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK), /* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr", SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK), /*04-63 reserved*/ }; /* TXE SDMA errors that cause an SPC freeze */ #define ALL_SDMA_FREEZE_ERR \ (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \ | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \ | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK) /* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */ #define PORT_DISCARD_EGRESS_ERRS \ (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \ | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \ | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK) /* * TXE Egress Error flags */ #define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK static struct flag_table egress_err_status_flags[] = { /* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)), /* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)), /* 2 reserved */ /* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr", SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)), /* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)), /* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)), /* 6 reserved */ /* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr", SEES(TX_PIO_LAUNCH_INTF_PARITY)), /* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr", SEES(TX_SDMA_LAUNCH_INTF_PARITY)), /* 9-10 reserved */ /*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr", SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)), /*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)), /*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)), /*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)), /*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)), /*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr", SEES(TX_SDMA0_DISALLOWED_PACKET)), /*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr", SEES(TX_SDMA1_DISALLOWED_PACKET)), /*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr", SEES(TX_SDMA2_DISALLOWED_PACKET)), /*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr", SEES(TX_SDMA3_DISALLOWED_PACKET)), /*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr", SEES(TX_SDMA4_DISALLOWED_PACKET)), /*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr", SEES(TX_SDMA5_DISALLOWED_PACKET)), /*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr", SEES(TX_SDMA6_DISALLOWED_PACKET)), /*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr", SEES(TX_SDMA7_DISALLOWED_PACKET)), /*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr", SEES(TX_SDMA8_DISALLOWED_PACKET)), /*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr", SEES(TX_SDMA9_DISALLOWED_PACKET)), /*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr", SEES(TX_SDMA10_DISALLOWED_PACKET)), /*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr", SEES(TX_SDMA11_DISALLOWED_PACKET)), /*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr", SEES(TX_SDMA12_DISALLOWED_PACKET)), /*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr", SEES(TX_SDMA13_DISALLOWED_PACKET)), /*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr", SEES(TX_SDMA14_DISALLOWED_PACKET)), /*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr", SEES(TX_SDMA15_DISALLOWED_PACKET)), /*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr", SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)), /*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr", SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)), /*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr", SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)), /*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr", SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)), /*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr", SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)), /*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr", SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)), /*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr", SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)), /*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr", SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)), /*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr", SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)), /*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)), /*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)), /*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)), /*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)), /*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)), /*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)), /*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)), /*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)), /*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)), /*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)), /*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)), /*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)), /*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)), /*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)), /*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)), /*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)), /*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)), /*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)), /*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)), /*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)), /*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)), /*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr", SEES(TX_READ_SDMA_MEMORY_CSR_UNC)), /*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr", SEES(TX_READ_PIO_MEMORY_CSR_UNC)), }; /* * TXE Egress Error Info flags */ #define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK static struct flag_table egress_err_info_flags[] = { /* 0*/ FLAG_ENTRY0("Reserved", 0ull), /* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)), /* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)), /* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)), /* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)), /* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)), /* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)), /* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)), /* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)), /* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)), /*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)), /*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)), /*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)), /*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)), /*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)), /*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)), /*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)), /*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)), /*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)), /*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)), /*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)), /*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)), }; /* TXE Egress errors that cause an SPC freeze */ #define ALL_TXE_EGRESS_FREEZE_ERR \ (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \ | SEES(TX_PIO_LAUNCH_INTF_PARITY) \ | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \ | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \ | SEES(TX_LAUNCH_CSR_PARITY) \ | SEES(TX_SBRD_CTL_CSR_PARITY) \ | SEES(TX_CONFIG_PARITY) \ | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \ | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \ | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \ | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \ | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \ | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \ | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \ | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \ | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \ | SEES(TX_CREDIT_RETURN_PARITY)) /* * TXE Send error flags */ #define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK static struct flag_table send_err_status_flags[] = { /* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)), /* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)), /* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR)) }; /* * TXE Send Context Error flags and consequences */ static struct flag_table sc_err_status_flags[] = { /* 0*/ FLAG_ENTRY("InconsistentSop", SEC_PACKET_DROPPED | SEC_SC_HALTED, SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK), /* 1*/ FLAG_ENTRY("DisallowedPacket", SEC_PACKET_DROPPED | SEC_SC_HALTED, SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK), /* 2*/ FLAG_ENTRY("WriteCrossesBoundary", SEC_WRITE_DROPPED | SEC_SC_HALTED, SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK), /* 3*/ FLAG_ENTRY("WriteOverflow", SEC_WRITE_DROPPED | SEC_SC_HALTED, SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK), /* 4*/ FLAG_ENTRY("WriteOutOfBounds", SEC_WRITE_DROPPED | SEC_SC_HALTED, SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK), /* 5-63 reserved*/ }; /* * RXE Receive Error flags */ #define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK static struct flag_table rxe_err_status_flags[] = { /* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)), /* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)), /* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)), /* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)), /* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)), /* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)), /* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)), /* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)), /* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)), /* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)), /*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)), /*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)), /*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)), /*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)), /*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)), /*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)), /*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr", RXES(RBUF_LOOKUP_DES_REG_UNC_COR)), /*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)), /*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)), /*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr", RXES(RBUF_BLOCK_LIST_READ_UNC)), /*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr", RXES(RBUF_BLOCK_LIST_READ_COR)), /*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr", RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)), /*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr", RXES(RBUF_CSR_QENT_CNT_PARITY)), /*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr", RXES(RBUF_CSR_QNEXT_BUF_PARITY)), /*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr", RXES(RBUF_CSR_QVLD_BIT_PARITY)), /*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)), /*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)), /*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr", RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)), /*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)), /*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)), /*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)), /*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)), /*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)), /*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)), /*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)), /*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr", RXES(RBUF_FL_INITDONE_PARITY)), /*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr", RXES(RBUF_FL_INIT_WR_ADDR_PARITY)), /*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)), /*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)), /*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)), /*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr", RXES(LOOKUP_DES_PART1_UNC_COR)), /*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr", RXES(LOOKUP_DES_PART2_PARITY)), /*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)), /*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)), /*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)), /*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)), /*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)), /*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)), /*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)), /*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)), /*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)), /*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)), /*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)), /*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)), /*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)), /*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)), /*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)), /*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)), /*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)), /*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)), /*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)), /*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)), /*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)), /*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY)) }; /* RXE errors that will trigger an SPC freeze */ #define ALL_RXE_FREEZE_ERR \ (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \ | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \ | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \ | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \ | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \ | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \ | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \ | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \ | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \ | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \ | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \ | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \ | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \ | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \ | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \ | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \ | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \ | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \ | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \ | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \ | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \ | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \ | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \ | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \ | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \ | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \ | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \ | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \ | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \ | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \ | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \ | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \ | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \ | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \ | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \ | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \ | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \ | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \ | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \ | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \ | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \ | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \ | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \ | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK) #define RXE_FREEZE_ABORT_MASK \ (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \ RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \ RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK) /* * DCC Error Flags */ #define DCCE(name) DCC_ERR_FLG_##name##_SMASK static struct flag_table dcc_err_flags[] = { FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)), FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)), FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)), FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)), FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)), FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)), FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)), FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)), FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)), FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)), FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)), FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)), FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)), FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)), FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)), FLAG_ENTRY0("link_err", DCCE(LINK_ERR)), FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)), FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)), FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)), FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)), FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)), FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)), FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)), FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)), FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)), FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)), FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)), FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)), FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)), FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)), FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)), FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)), FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)), FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)), FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)), FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)), FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)), FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)), FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)), FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)), FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)), FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)), FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)), FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)), FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)), FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)), }; /* * LCB error flags */ #define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK static struct flag_table lcb_err_flags[] = { /* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)), /* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)), /* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)), /* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST", LCBE(ALL_LNS_FAILED_REINIT_TEST)), /* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)), /* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)), /* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)), /* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)), /* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)), /* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)), /*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)), /*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)), /*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)), /*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER", LCBE(UNEXPECTED_ROUND_TRIP_MARKER)), /*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)), /*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)), /*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)), /*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)), /*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)), /*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE", LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)), /*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)), /*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)), /*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)), /*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)), /*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)), /*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)), /*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP", LCBE(RST_FOR_INCOMPLT_RND_TRIP)), /*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)), /*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE", LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)), /*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR", LCBE(REDUNDANT_FLIT_PARITY_ERR)) }; /* * DC8051 Error Flags */ #define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK static struct flag_table dc8051_err_flags[] = { FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)), FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)), FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)), FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)), FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)), FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)), FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)), FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)), FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES", D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)), FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)), }; /* * DC8051 Information Error flags * * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field. */ static struct flag_table dc8051_info_err_flags[] = { FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED), FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME), FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET), FLAG_ENTRY0("Serdes internal loopback failure", FAILED_SERDES_INTERNAL_LOOPBACK), FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT), FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING), FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE), FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM), FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ), FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1), FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2), FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT), FLAG_ENTRY0("Host Handshake Timeout", HOST_HANDSHAKE_TIMEOUT), FLAG_ENTRY0("External Device Request Timeout", EXTERNAL_DEVICE_REQ_TIMEOUT), }; /* * DC8051 Information Host Information flags * * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field. */ static struct flag_table dc8051_info_host_msg_flags[] = { FLAG_ENTRY0("Host request done", 0x0001), FLAG_ENTRY0("BC PWR_MGM message", 0x0002), FLAG_ENTRY0("BC SMA message", 0x0004), FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008), FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010), FLAG_ENTRY0("External device config request", 0x0020), FLAG_ENTRY0("VerifyCap all frames received", 0x0040), FLAG_ENTRY0("LinkUp achieved", 0x0080), FLAG_ENTRY0("Link going down", 0x0100), FLAG_ENTRY0("Link width downgraded", 0x0200), }; static u32 encoded_size(u32 size); static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate); static int set_physical_link_state(struct hfi1_devdata *dd, u64 state); static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management, u8 *continuous); static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z, u8 *vcu, u16 *vl15buf, u8 *crc_sizes); static void read_vc_remote_link_width(struct hfi1_devdata *dd, u8 *remote_tx_rate, u16 *link_widths); static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits, u8 *flag_bits, u16 *link_widths); static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id, u8 *device_rev); static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx); static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx, u8 *tx_polarity_inversion, u8 *rx_polarity_inversion, u8 *max_rate); static void handle_sdma_eng_err(struct hfi1_devdata *dd, unsigned int context, u64 err_status); static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg); static void handle_dcc_err(struct hfi1_devdata *dd, unsigned int context, u64 err_status); static void handle_lcb_err(struct hfi1_devdata *dd, unsigned int context, u64 err_status); static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg); static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg); static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg); static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg); static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg); static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg); static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg); static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg); static void set_partition_keys(struct hfi1_pportdata *ppd); static const char *link_state_name(u32 state); static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state); static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data, u64 *out_data); static int read_idle_sma(struct hfi1_devdata *dd, u64 *data); static int thermal_init(struct hfi1_devdata *dd); static void update_statusp(struct hfi1_pportdata *ppd, u32 state); static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd, int msecs); static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state, int msecs); static void log_state_transition(struct hfi1_pportdata *ppd, u32 state); static void log_physical_state(struct hfi1_pportdata *ppd, u32 state); static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state, int msecs); static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd, int msecs); static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc); static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr); static void handle_temp_err(struct hfi1_devdata *dd); static void dc_shutdown(struct hfi1_devdata *dd); static void dc_start(struct hfi1_devdata *dd); static int qos_rmt_entries(unsigned int n_krcv_queues, unsigned int *mp, unsigned int *np); static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd); static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms); static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index); static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width); /* * Error interrupt table entry. This is used as input to the interrupt * "clear down" routine used for all second tier error interrupt register. * Second tier interrupt registers have a single bit representing them * in the top-level CceIntStatus. */ struct err_reg_info { u32 status; /* status CSR offset */ u32 clear; /* clear CSR offset */ u32 mask; /* mask CSR offset */ void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg); const char *desc; }; #define NUM_MISC_ERRS (IS_GENERAL_ERR_END + 1 - IS_GENERAL_ERR_START) #define NUM_DC_ERRS (IS_DC_END + 1 - IS_DC_START) #define NUM_VARIOUS (IS_VARIOUS_END + 1 - IS_VARIOUS_START) /* * Helpers for building HFI and DC error interrupt table entries. Different * helpers are needed because of inconsistent register names. */ #define EE(reg, handler, desc) \ { reg##_STATUS, reg##_CLEAR, reg##_MASK, \ handler, desc } #define DC_EE1(reg, handler, desc) \ { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc } #define DC_EE2(reg, handler, desc) \ { reg##_FLG, reg##_CLR, reg##_EN, handler, desc } /* * Table of the "misc" grouping of error interrupts. Each entry refers to * another register containing more information. */ static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = { /* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"), /* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"), /* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"), /* 3*/ { 0, 0, 0, NULL }, /* reserved */ /* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"), /* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"), /* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"), /* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr") /* the rest are reserved */ }; /* * Index into the Various section of the interrupt sources * corresponding to the Critical Temperature interrupt. */ #define TCRIT_INT_SOURCE 4 /* * SDMA error interrupt entry - refers to another register containing more * information. */ static const struct err_reg_info sdma_eng_err = EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr"); static const struct err_reg_info various_err[NUM_VARIOUS] = { /* 0*/ { 0, 0, 0, NULL }, /* PbcInt */ /* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */ /* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"), /* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"), /* 4*/ { 0, 0, 0, NULL }, /* TCritInt */ /* rest are reserved */ }; /* * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG * register can not be derived from the MTU value because 10K is not * a power of 2. Therefore, we need a constant. Everything else can * be calculated. */ #define DCC_CFG_PORT_MTU_CAP_10240 7 /* * Table of the DC grouping of error interrupts. Each entry refers to * another register containing more information. */ static const struct err_reg_info dc_errs[NUM_DC_ERRS] = { /* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"), /* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"), /* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"), /* 3*/ /* dc_lbm_int - special, see is_dc_int() */ /* the rest are reserved */ }; struct cntr_entry { /* * counter name */ char *name; /* * csr to read for name (if applicable) */ u64 csr; /* * offset into dd or ppd to store the counter's value */ int offset; /* * flags */ u8 flags; /* * accessor for stat element, context either dd or ppd */ u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl, int mode, u64 data); }; #define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0 #define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159 #define CNTR_ELEM(name, csr, offset, flags, accessor) \ { \ name, \ csr, \ offset, \ flags, \ accessor \ } /* 32bit RXE */ #define RXE32_PORT_CNTR_ELEM(name, counter, flags) \ CNTR_ELEM(#name, \ (counter * 8 + RCV_COUNTER_ARRAY32), \ 0, flags | CNTR_32BIT, \ port_access_u32_csr) #define RXE32_DEV_CNTR_ELEM(name, counter, flags) \ CNTR_ELEM(#name, \ (counter * 8 + RCV_COUNTER_ARRAY32), \ 0, flags | CNTR_32BIT, \ dev_access_u32_csr) /* 64bit RXE */ #define RXE64_PORT_CNTR_ELEM(name, counter, flags) \ CNTR_ELEM(#name, \ (counter * 8 + RCV_COUNTER_ARRAY64), \ 0, flags, \ port_access_u64_csr) #define RXE64_DEV_CNTR_ELEM(name, counter, flags) \ CNTR_ELEM(#name, \ (counter * 8 + RCV_COUNTER_ARRAY64), \ 0, flags, \ dev_access_u64_csr) #define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx #define OVR_ELM(ctx) \ CNTR_ELEM("RcvHdrOvr" #ctx, \ (RCV_HDR_OVFL_CNT + ctx * 0x100), \ 0, CNTR_NORMAL, port_access_u64_csr) /* 32bit TXE */ #define TXE32_PORT_CNTR_ELEM(name, counter, flags) \ CNTR_ELEM(#name, \ (counter * 8 + SEND_COUNTER_ARRAY32), \ 0, flags | CNTR_32BIT, \ port_access_u32_csr) /* 64bit TXE */ #define TXE64_PORT_CNTR_ELEM(name, counter, flags) \ CNTR_ELEM(#name, \ (counter * 8 + SEND_COUNTER_ARRAY64), \ 0, flags, \ port_access_u64_csr) # define TX64_DEV_CNTR_ELEM(name, counter, flags) \ CNTR_ELEM(#name,\ counter * 8 + SEND_COUNTER_ARRAY64, \ 0, \ flags, \ dev_access_u64_csr) /* CCE */ #define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \ CNTR_ELEM(#name, \ (counter * 8 + CCE_COUNTER_ARRAY32), \ 0, flags | CNTR_32BIT, \ dev_access_u32_csr) #define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \ CNTR_ELEM(#name, \ (counter * 8 + CCE_INT_COUNTER_ARRAY32), \ 0, flags | CNTR_32BIT, \ dev_access_u32_csr) /* DC */ #define DC_PERF_CNTR(name, counter, flags) \ CNTR_ELEM(#name, \ counter, \ 0, \ flags, \ dev_access_u64_csr) #define DC_PERF_CNTR_LCB(name, counter, flags) \ CNTR_ELEM(#name, \ counter, \ 0, \ flags, \ dc_access_lcb_cntr) /* ibp counters */ #define SW_IBP_CNTR(name, cntr) \ CNTR_ELEM(#name, \ 0, \ 0, \ CNTR_SYNTH, \ access_ibp_##cntr) /** * hfi1_addr_from_offset - return addr for readq/writeq * @dd: the dd device * @offset: the offset of the CSR within bar0 * * This routine selects the appropriate base address * based on the indicated offset. */ static inline void __iomem *hfi1_addr_from_offset( const struct hfi1_devdata *dd, u32 offset) { if (offset >= dd->base2_start) return dd->kregbase2 + (offset - dd->base2_start); return dd->kregbase1 + offset; } /** * read_csr - read CSR at the indicated offset * @dd: the dd device * @offset: the offset of the CSR within bar0 * * Return: the value read or all FF's if there * is no mapping */ u64 read_csr(const struct hfi1_devdata *dd, u32 offset) { if (dd->flags & HFI1_PRESENT) return readq(hfi1_addr_from_offset(dd, offset)); return -1; } /** * write_csr - write CSR at the indicated offset * @dd: the dd device * @offset: the offset of the CSR within bar0 * @value: value to write */ void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value) { if (dd->flags & HFI1_PRESENT) { void __iomem *base = hfi1_addr_from_offset(dd, offset); /* avoid write to RcvArray */ if (WARN_ON(offset >= RCV_ARRAY && offset < dd->base2_start)) return; writeq(value, base); } } /** * get_csr_addr - return te iomem address for offset * @dd: the dd device * @offset: the offset of the CSR within bar0 * * Return: The iomem address to use in subsequent * writeq/readq operations. */ void __iomem *get_csr_addr( const struct hfi1_devdata *dd, u32 offset) { if (dd->flags & HFI1_PRESENT) return hfi1_addr_from_offset(dd, offset); return NULL; } static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr, int mode, u64 value) { u64 ret; if (mode == CNTR_MODE_R) { ret = read_csr(dd, csr); } else if (mode == CNTR_MODE_W) { write_csr(dd, csr, value); ret = value; } else { dd_dev_err(dd, "Invalid cntr register access mode"); return 0; } hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode); return ret; } /* Dev Access */ static u64 dev_access_u32_csr(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = context; u64 csr = entry->csr; if (entry->flags & CNTR_SDMA) { if (vl == CNTR_INVALID_VL) return 0; csr += 0x100 * vl; } else { if (vl != CNTR_INVALID_VL) return 0; } return read_write_csr(dd, csr, mode, data); } static u64 access_sde_err_cnt(const struct cntr_entry *entry, void *context, int idx, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; if (dd->per_sdma && idx < dd->num_sdma) return dd->per_sdma[idx].err_cnt; return 0; } static u64 access_sde_int_cnt(const struct cntr_entry *entry, void *context, int idx, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; if (dd->per_sdma && idx < dd->num_sdma) return dd->per_sdma[idx].sdma_int_cnt; return 0; } static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry, void *context, int idx, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; if (dd->per_sdma && idx < dd->num_sdma) return dd->per_sdma[idx].idle_int_cnt; return 0; } static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry, void *context, int idx, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; if (dd->per_sdma && idx < dd->num_sdma) return dd->per_sdma[idx].progress_int_cnt; return 0; } static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = context; u64 val = 0; u64 csr = entry->csr; if (entry->flags & CNTR_VL) { if (vl == CNTR_INVALID_VL) return 0; csr += 8 * vl; } else { if (vl != CNTR_INVALID_VL) return 0; } val = read_write_csr(dd, csr, mode, data); return val; } static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = context; u32 csr = entry->csr; int ret = 0; if (vl != CNTR_INVALID_VL) return 0; if (mode == CNTR_MODE_R) ret = read_lcb_csr(dd, csr, &data); else if (mode == CNTR_MODE_W) ret = write_lcb_csr(dd, csr, data); if (ret) { if (!(dd->flags & HFI1_SHUTDOWN)) dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr); return 0; } hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode); return data; } /* Port Access */ static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_pportdata *ppd = context; if (vl != CNTR_INVALID_VL) return 0; return read_write_csr(ppd->dd, entry->csr, mode, data); } static u64 port_access_u64_csr(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_pportdata *ppd = context; u64 val; u64 csr = entry->csr; if (entry->flags & CNTR_VL) { if (vl == CNTR_INVALID_VL) return 0; csr += 8 * vl; } else { if (vl != CNTR_INVALID_VL) return 0; } val = read_write_csr(ppd->dd, csr, mode, data); return val; } /* Software defined */ static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode, u64 data) { u64 ret; if (mode == CNTR_MODE_R) { ret = *cntr; } else if (mode == CNTR_MODE_W) { *cntr = data; ret = data; } else { dd_dev_err(dd, "Invalid cntr sw access mode"); return 0; } hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode); return ret; } static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_pportdata *ppd = context; if (vl != CNTR_INVALID_VL) return 0; return read_write_sw(ppd->dd, &ppd->link_downed, mode, data); } static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_pportdata *ppd = context; if (vl != CNTR_INVALID_VL) return 0; return read_write_sw(ppd->dd, &ppd->link_up, mode, data); } static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; if (vl != CNTR_INVALID_VL) return 0; return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data); } static u64 access_sw_xmit_discards(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; u64 zero = 0; u64 *counter; if (vl == CNTR_INVALID_VL) counter = &ppd->port_xmit_discards; else if (vl >= 0 && vl < C_VL_COUNT) counter = &ppd->port_xmit_discards_vl[vl]; else counter = &zero; return read_write_sw(ppd->dd, counter, mode, data); } static u64 access_xmit_constraint_errs(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_pportdata *ppd = context; if (vl != CNTR_INVALID_VL) return 0; return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors, mode, data); } static u64 access_rcv_constraint_errs(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_pportdata *ppd = context; if (vl != CNTR_INVALID_VL) return 0; return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors, mode, data); } u64 get_all_cpu_total(u64 __percpu *cntr) { int cpu; u64 counter = 0; for_each_possible_cpu(cpu) counter += *per_cpu_ptr(cntr, cpu); return counter; } static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val, u64 __percpu *cntr, int vl, int mode, u64 data) { u64 ret = 0; if (vl != CNTR_INVALID_VL) return 0; if (mode == CNTR_MODE_R) { ret = get_all_cpu_total(cntr) - *z_val; } else if (mode == CNTR_MODE_W) { /* A write can only zero the counter */ if (data == 0) *z_val = get_all_cpu_total(cntr); else dd_dev_err(dd, "Per CPU cntrs can only be zeroed"); } else { dd_dev_err(dd, "Invalid cntr sw cpu access mode"); return 0; } return ret; } static u64 access_sw_cpu_intr(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = context; return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl, mode, data); } static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = context; return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl, mode, data); } static u64 access_sw_pio_wait(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = context; return dd->verbs_dev.n_piowait; } static u64 access_sw_pio_drain(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->verbs_dev.n_piodrain; } static u64 access_sw_ctx0_seq_drop(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = context; return dd->ctx0_seq_drop; } static u64 access_sw_vtx_wait(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = context; return dd->verbs_dev.n_txwait; } static u64 access_sw_kmem_wait(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = context; return dd->verbs_dev.n_kmem_wait; } static u64 access_sw_send_schedule(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl, mode, data); } /* Software counters for the error status bits within MISC_ERR_STATUS */ static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->misc_err_status_cnt[12]; } static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->misc_err_status_cnt[11]; } static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->misc_err_status_cnt[10]; } static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->misc_err_status_cnt[9]; } static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->misc_err_status_cnt[8]; } static u64 access_misc_efuse_read_bad_addr_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->misc_err_status_cnt[7]; } static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->misc_err_status_cnt[6]; } static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->misc_err_status_cnt[5]; } static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->misc_err_status_cnt[4]; } static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->misc_err_status_cnt[3]; } static u64 access_misc_csr_write_bad_addr_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->misc_err_status_cnt[2]; } static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->misc_err_status_cnt[1]; } static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->misc_err_status_cnt[0]; } /* * Software counter for the aggregate of * individual CceErrStatus counters */ static u64 access_sw_cce_err_status_aggregated_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->sw_cce_err_status_aggregate; } /* * Software counters corresponding to each of the * error status bits within CceErrStatus */ static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[40]; } static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[39]; } static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[38]; } static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[37]; } static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[36]; } static u64 access_cce_rxdma_conv_fifo_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[35]; } static u64 access_cce_rcpl_async_fifo_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[34]; } static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[33]; } static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[32]; } static u64 access_la_triggered_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[31]; } static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[30]; } static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[29]; } static u64 access_pcic_transmit_back_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[28]; } static u64 access_pcic_transmit_front_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[27]; } static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[26]; } static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[25]; } static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[24]; } static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[23]; } static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[22]; } static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[21]; } static u64 access_pcic_n_post_dat_q_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[20]; } static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[19]; } static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[18]; } static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[17]; } static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[16]; } static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[15]; } static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[14]; } static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[13]; } static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[12]; } static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[11]; } static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[10]; } static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[9]; } static u64 access_cce_cli2_async_fifo_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[8]; } static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[7]; } static u64 access_cce_cli0_async_fifo_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[6]; } static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[5]; } static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[4]; } static u64 access_cce_trgt_async_fifo_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[3]; } static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[2]; } static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[1]; } static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->cce_err_status_cnt[0]; } /* * Software counters corresponding to each of the * error status bits within RcvErrStatus */ static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[63]; } static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[62]; } static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[61]; } static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[60]; } static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[59]; } static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[58]; } static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[57]; } static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[56]; } static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[55]; } static u64 access_rx_dma_data_fifo_rd_cor_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[54]; } static u64 access_rx_dma_data_fifo_rd_unc_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[53]; } static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[52]; } static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[51]; } static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[50]; } static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[49]; } static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[48]; } static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[47]; } static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[46]; } static u64 access_rx_hq_intr_csr_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[45]; } static u64 access_rx_lookup_csr_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[44]; } static u64 access_rx_lookup_rcv_array_cor_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[43]; } static u64 access_rx_lookup_rcv_array_unc_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[42]; } static u64 access_rx_lookup_des_part2_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[41]; } static u64 access_rx_lookup_des_part1_unc_cor_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[40]; } static u64 access_rx_lookup_des_part1_unc_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[39]; } static u64 access_rx_rbuf_next_free_buf_cor_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[38]; } static u64 access_rx_rbuf_next_free_buf_unc_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[37]; } static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[36]; } static u64 access_rx_rbuf_fl_initdone_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[35]; } static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[34]; } static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[33]; } static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[32]; } static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[31]; } static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[30]; } static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[29]; } static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[28]; } static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[27]; } static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[26]; } static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[25]; } static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[24]; } static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[23]; } static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[22]; } static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[21]; } static u64 access_rx_rbuf_block_list_read_cor_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[20]; } static u64 access_rx_rbuf_block_list_read_unc_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[19]; } static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[18]; } static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[17]; } static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[16]; } static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[15]; } static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[14]; } static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[13]; } static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[12]; } static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[11]; } static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[10]; } static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[9]; } static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[8]; } static u64 access_rx_rcv_qp_map_table_cor_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[7]; } static u64 access_rx_rcv_qp_map_table_unc_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[6]; } static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[5]; } static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[4]; } static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[3]; } static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[2]; } static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[1]; } static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->rcv_err_status_cnt[0]; } /* * Software counters corresponding to each of the * error status bits within SendPioErrStatus */ static u64 access_pio_pec_sop_head_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_pio_err_status_cnt[35]; } static u64 access_pio_pcc_sop_head_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_pio_err_status_cnt[34]; } static u64 access_pio_last_returned_cnt_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_pio_err_status_cnt[33]; } static u64 access_pio_current_free_cnt_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_pio_err_status_cnt[32]; } static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_pio_err_status_cnt[31]; } static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_pio_err_status_cnt[30]; } static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_pio_err_status_cnt[29]; } static u64 access_pio_ppmc_bqc_mem_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_pio_err_status_cnt[28]; } static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_pio_err_status_cnt[27]; } static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_pio_err_status_cnt[26]; } static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_pio_err_status_cnt[25]; } static u64 access_pio_block_qw_count_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_pio_err_status_cnt[24]; } static u64 access_pio_write_qw_valid_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_pio_err_status_cnt[23]; } static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_pio_err_status_cnt[22]; } static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_pio_err_status_cnt[21]; } static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_pio_err_status_cnt[20]; } static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_pio_err_status_cnt[19]; } static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_pio_err_status_cnt[18]; } static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_pio_err_status_cnt[17]; } static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_pio_err_status_cnt[16]; } static u64 access_pio_credit_ret_fifo_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_pio_err_status_cnt[15]; } static u64 access_pio_v1_len_mem_bank1_cor_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_pio_err_status_cnt[14]; } static u64 access_pio_v1_len_mem_bank0_cor_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_pio_err_status_cnt[13]; } static u64 access_pio_v1_len_mem_bank1_unc_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_pio_err_status_cnt[12]; } static u64 access_pio_v1_len_mem_bank0_unc_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_pio_err_status_cnt[11]; } static u64 access_pio_sm_pkt_reset_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_pio_err_status_cnt[10]; } static u64 access_pio_pkt_evict_fifo_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_pio_err_status_cnt[9]; } static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_pio_err_status_cnt[8]; } static u64 access_pio_sbrdctl_crrel_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_pio_err_status_cnt[7]; } static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_pio_err_status_cnt[6]; } static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_pio_err_status_cnt[5]; } static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_pio_err_status_cnt[4]; } static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_pio_err_status_cnt[3]; } static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_pio_err_status_cnt[2]; } static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_pio_err_status_cnt[1]; } static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_pio_err_status_cnt[0]; } /* * Software counters corresponding to each of the * error status bits within SendDmaErrStatus */ static u64 access_sdma_pcie_req_tracking_cor_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_dma_err_status_cnt[3]; } static u64 access_sdma_pcie_req_tracking_unc_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_dma_err_status_cnt[2]; } static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_dma_err_status_cnt[1]; } static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_dma_err_status_cnt[0]; } /* * Software counters corresponding to each of the * error status bits within SendEgressErrStatus */ static u64 access_tx_read_pio_memory_csr_unc_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[63]; } static u64 access_tx_read_sdma_memory_csr_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[62]; } static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[61]; } static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[60]; } static u64 access_tx_read_sdma_memory_cor_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[59]; } static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[58]; } static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[57]; } static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[56]; } static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[55]; } static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[54]; } static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[53]; } static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[52]; } static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[51]; } static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[50]; } static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[49]; } static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[48]; } static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[47]; } static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[46]; } static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[45]; } static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[44]; } static u64 access_tx_read_sdma_memory_unc_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[43]; } static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[42]; } static u64 access_tx_credit_return_partiy_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[41]; } static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[40]; } static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[39]; } static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[38]; } static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[37]; } static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[36]; } static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[35]; } static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[34]; } static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[33]; } static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[32]; } static u64 access_tx_sdma15_disallowed_packet_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[31]; } static u64 access_tx_sdma14_disallowed_packet_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[30]; } static u64 access_tx_sdma13_disallowed_packet_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[29]; } static u64 access_tx_sdma12_disallowed_packet_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[28]; } static u64 access_tx_sdma11_disallowed_packet_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[27]; } static u64 access_tx_sdma10_disallowed_packet_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[26]; } static u64 access_tx_sdma9_disallowed_packet_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[25]; } static u64 access_tx_sdma8_disallowed_packet_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[24]; } static u64 access_tx_sdma7_disallowed_packet_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[23]; } static u64 access_tx_sdma6_disallowed_packet_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[22]; } static u64 access_tx_sdma5_disallowed_packet_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[21]; } static u64 access_tx_sdma4_disallowed_packet_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[20]; } static u64 access_tx_sdma3_disallowed_packet_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[19]; } static u64 access_tx_sdma2_disallowed_packet_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[18]; } static u64 access_tx_sdma1_disallowed_packet_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[17]; } static u64 access_tx_sdma0_disallowed_packet_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[16]; } static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[15]; } static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[14]; } static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[13]; } static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[12]; } static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[11]; } static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[10]; } static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[9]; } static u64 access_tx_sdma_launch_intf_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[8]; } static u64 access_tx_pio_launch_intf_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[7]; } static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[6]; } static u64 access_tx_incorrect_link_state_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[5]; } static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[4]; } static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[3]; } static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[2]; } static u64 access_tx_pkt_integrity_mem_unc_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[1]; } static u64 access_tx_pkt_integrity_mem_cor_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_egress_err_status_cnt[0]; } /* * Software counters corresponding to each of the * error status bits within SendErrStatus */ static u64 access_send_csr_write_bad_addr_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_err_status_cnt[2]; } static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_err_status_cnt[1]; } static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->send_err_status_cnt[0]; } /* * Software counters corresponding to each of the * error status bits within SendCtxtErrStatus */ static u64 access_pio_write_out_of_bounds_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->sw_ctxt_err_status_cnt[4]; } static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->sw_ctxt_err_status_cnt[3]; } static u64 access_pio_write_crosses_boundary_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->sw_ctxt_err_status_cnt[2]; } static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->sw_ctxt_err_status_cnt[1]; } static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->sw_ctxt_err_status_cnt[0]; } /* * Software counters corresponding to each of the * error status bits within SendDmaEngErrStatus */ static u64 access_sdma_header_request_fifo_cor_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->sw_send_dma_eng_err_status_cnt[23]; } static u64 access_sdma_header_storage_cor_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->sw_send_dma_eng_err_status_cnt[22]; } static u64 access_sdma_packet_tracking_cor_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->sw_send_dma_eng_err_status_cnt[21]; } static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->sw_send_dma_eng_err_status_cnt[20]; } static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->sw_send_dma_eng_err_status_cnt[19]; } static u64 access_sdma_header_request_fifo_unc_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->sw_send_dma_eng_err_status_cnt[18]; } static u64 access_sdma_header_storage_unc_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->sw_send_dma_eng_err_status_cnt[17]; } static u64 access_sdma_packet_tracking_unc_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->sw_send_dma_eng_err_status_cnt[16]; } static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->sw_send_dma_eng_err_status_cnt[15]; } static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->sw_send_dma_eng_err_status_cnt[14]; } static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->sw_send_dma_eng_err_status_cnt[13]; } static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->sw_send_dma_eng_err_status_cnt[12]; } static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->sw_send_dma_eng_err_status_cnt[11]; } static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->sw_send_dma_eng_err_status_cnt[10]; } static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->sw_send_dma_eng_err_status_cnt[9]; } static u64 access_sdma_packet_desc_overflow_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->sw_send_dma_eng_err_status_cnt[8]; } static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->sw_send_dma_eng_err_status_cnt[7]; } static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->sw_send_dma_eng_err_status_cnt[6]; } static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->sw_send_dma_eng_err_status_cnt[5]; } static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->sw_send_dma_eng_err_status_cnt[4]; } static u64 access_sdma_tail_out_of_bounds_err_cnt( const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->sw_send_dma_eng_err_status_cnt[3]; } static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->sw_send_dma_eng_err_status_cnt[2]; } static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->sw_send_dma_eng_err_status_cnt[1]; } static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; return dd->sw_send_dma_eng_err_status_cnt[0]; } static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; u64 val = 0; u64 csr = entry->csr; val = read_write_csr(dd, csr, mode, data); if (mode == CNTR_MODE_R) { val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ? CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors; } else if (mode == CNTR_MODE_W) { dd->sw_rcv_bypass_packet_errors = 0; } else { dd_dev_err(dd, "Invalid cntr register access mode"); return 0; } return val; } #define def_access_sw_cpu(cntr) \ static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \ void *context, int vl, int mode, u64 data) \ { \ struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \ return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \ ppd->ibport_data.rvp.cntr, vl, \ mode, data); \ } def_access_sw_cpu(rc_acks); def_access_sw_cpu(rc_qacks); def_access_sw_cpu(rc_delayed_comp); #define def_access_ibp_counter(cntr) \ static u64 access_ibp_##cntr(const struct cntr_entry *entry, \ void *context, int vl, int mode, u64 data) \ { \ struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \ \ if (vl != CNTR_INVALID_VL) \ return 0; \ \ return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \ mode, data); \ } def_access_ibp_counter(loop_pkts); def_access_ibp_counter(rc_resends); def_access_ibp_counter(rnr_naks); def_access_ibp_counter(other_naks); def_access_ibp_counter(rc_timeouts); def_access_ibp_counter(pkt_drops); def_access_ibp_counter(dmawait); def_access_ibp_counter(rc_seqnak); def_access_ibp_counter(rc_dupreq); def_access_ibp_counter(rdma_seq); def_access_ibp_counter(unaligned); def_access_ibp_counter(seq_naks); def_access_ibp_counter(rc_crwaits); static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = { [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH), [C_RX_LEN_ERR] = RXE32_DEV_CNTR_ELEM(RxLenErr, RCV_LENGTH_ERR_CNT, CNTR_SYNTH), [C_RX_SHORT_ERR] = RXE32_DEV_CNTR_ELEM(RxShrErr, RCV_SHORT_ERR_CNT, CNTR_SYNTH), [C_RX_ICRC_ERR] = RXE32_DEV_CNTR_ELEM(RxICrcErr, RCV_ICRC_ERR_CNT, CNTR_SYNTH), [C_RX_EBP] = RXE32_DEV_CNTR_ELEM(RxEbpCnt, RCV_EBP_CNT, CNTR_SYNTH), [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT, CNTR_NORMAL), [C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT, CNTR_NORMAL), [C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs, RCV_TID_FLOW_GEN_MISMATCH_CNT, CNTR_NORMAL), [C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL, CNTR_NORMAL), [C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs, RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL), [C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt, CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL), [C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT, CNTR_NORMAL), [C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT, CNTR_NORMAL), [C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT, CNTR_NORMAL), [C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT, CNTR_NORMAL), [C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT, CNTR_NORMAL), [C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT, CNTR_NORMAL), [C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt, CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL), [C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt, CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL), [C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT, CNTR_SYNTH), [C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH, access_dc_rcv_err_cnt), [C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT, CNTR_SYNTH), [C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT, CNTR_SYNTH), [C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT, CNTR_SYNTH), [C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts, DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH), [C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts, DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT, CNTR_SYNTH), [C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr, DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH), [C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT, CNTR_SYNTH), [C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT, CNTR_SYNTH), [C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT, CNTR_SYNTH), [C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT, CNTR_SYNTH), [C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT, CNTR_SYNTH), [C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT, CNTR_SYNTH), [C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT, CNTR_SYNTH), [C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT, CNTR_SYNTH | CNTR_VL), [C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT, CNTR_SYNTH | CNTR_VL), [C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH), [C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT, CNTR_SYNTH | CNTR_VL), [C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH), [C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT, CNTR_SYNTH | CNTR_VL), [C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT, CNTR_SYNTH), [C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT, CNTR_SYNTH | CNTR_VL), [C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT, CNTR_SYNTH), [C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT, CNTR_SYNTH | CNTR_VL), [C_DC_TOTAL_CRC] = DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR, CNTR_SYNTH), [C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0, CNTR_SYNTH), [C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1, CNTR_SYNTH), [C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2, CNTR_SYNTH), [C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3, CNTR_SYNTH), [C_DC_CRC_MULT_LN] = DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN, CNTR_SYNTH), [C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT, CNTR_SYNTH), [C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT, CNTR_SYNTH), [C_DC_SEQ_CRC_CNT] = DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT, CNTR_SYNTH), [C_DC_ESC0_ONLY_CNT] = DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT, CNTR_SYNTH), [C_DC_ESC0_PLUS1_CNT] = DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT, CNTR_SYNTH), [C_DC_ESC0_PLUS2_CNT] = DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT, CNTR_SYNTH), [C_DC_REINIT_FROM_PEER_CNT] = DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT, CNTR_SYNTH), [C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT, CNTR_SYNTH), [C_DC_MISC_FLG_CNT] = DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT, CNTR_SYNTH), [C_DC_PRF_GOOD_LTP_CNT] = DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH), [C_DC_PRF_ACCEPTED_LTP_CNT] = DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT, CNTR_SYNTH), [C_DC_PRF_RX_FLIT_CNT] = DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH), [C_DC_PRF_TX_FLIT_CNT] = DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH), [C_DC_PRF_CLK_CNTR] = DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH), [C_DC_PG_DBG_FLIT_CRDTS_CNT] = DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH), [C_DC_PG_STS_PAUSE_COMPLETE_CNT] = DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT, CNTR_SYNTH), [C_DC_PG_STS_TX_SBE_CNT] = DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH), [C_DC_PG_STS_TX_MBE_CNT] = DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT, CNTR_SYNTH), [C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL, access_sw_cpu_intr), [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL, access_sw_cpu_rcv_limit), [C_SW_CTX0_SEQ_DROP] = CNTR_ELEM("SeqDrop0", 0, 0, CNTR_NORMAL, access_sw_ctx0_seq_drop), [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL, access_sw_vtx_wait), [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL, access_sw_pio_wait), [C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL, access_sw_pio_drain), [C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL, access_sw_kmem_wait), [C_SW_TID_WAIT] = CNTR_ELEM("TidWait", 0, 0, CNTR_NORMAL, hfi1_access_sw_tid_wait), [C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL, access_sw_send_schedule), [C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn", SEND_DMA_DESC_FETCHED_CNT, 0, CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA, dev_access_u32_csr), [C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0, CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA, access_sde_int_cnt), [C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0, CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA, access_sde_err_cnt), [C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0, CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA, access_sde_idle_int_cnt), [C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0, CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA, access_sde_progress_int_cnt), /* MISC_ERR_STATUS */ [C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0, CNTR_NORMAL, access_misc_pll_lock_fail_err_cnt), [C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0, CNTR_NORMAL, access_misc_mbist_fail_err_cnt), [C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0, CNTR_NORMAL, access_misc_invalid_eep_cmd_err_cnt), [C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0, CNTR_NORMAL, access_misc_efuse_done_parity_err_cnt), [C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0, CNTR_NORMAL, access_misc_efuse_write_err_cnt), [C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0, 0, CNTR_NORMAL, access_misc_efuse_read_bad_addr_err_cnt), [C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0, CNTR_NORMAL, access_misc_efuse_csr_parity_err_cnt), [C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0, CNTR_NORMAL, access_misc_fw_auth_failed_err_cnt), [C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0, CNTR_NORMAL, access_misc_key_mismatch_err_cnt), [C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0, CNTR_NORMAL, access_misc_sbus_write_failed_err_cnt), [C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0, CNTR_NORMAL, access_misc_csr_write_bad_addr_err_cnt), [C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0, CNTR_NORMAL, access_misc_csr_read_bad_addr_err_cnt), [C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0, CNTR_NORMAL, access_misc_csr_parity_err_cnt), /* CceErrStatus */ [C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0, CNTR_NORMAL, access_sw_cce_err_status_aggregated_cnt), [C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0, CNTR_NORMAL, access_cce_msix_csr_parity_err_cnt), [C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0, CNTR_NORMAL, access_cce_int_map_unc_err_cnt), [C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0, CNTR_NORMAL, access_cce_int_map_cor_err_cnt), [C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0, CNTR_NORMAL, access_cce_msix_table_unc_err_cnt), [C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0, CNTR_NORMAL, access_cce_msix_table_cor_err_cnt), [C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0, 0, CNTR_NORMAL, access_cce_rxdma_conv_fifo_parity_err_cnt), [C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0, 0, CNTR_NORMAL, access_cce_rcpl_async_fifo_parity_err_cnt), [C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0, CNTR_NORMAL, access_cce_seg_write_bad_addr_err_cnt), [C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0, CNTR_NORMAL, access_cce_seg_read_bad_addr_err_cnt), [C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0, CNTR_NORMAL, access_la_triggered_cnt), [C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0, CNTR_NORMAL, access_cce_trgt_cpl_timeout_err_cnt), [C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0, CNTR_NORMAL, access_pcic_receive_parity_err_cnt), [C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0, CNTR_NORMAL, access_pcic_transmit_back_parity_err_cnt), [C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0, 0, CNTR_NORMAL, access_pcic_transmit_front_parity_err_cnt), [C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0, CNTR_NORMAL, access_pcic_cpl_dat_q_unc_err_cnt), [C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0, CNTR_NORMAL, access_pcic_cpl_hd_q_unc_err_cnt), [C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0, CNTR_NORMAL, access_pcic_post_dat_q_unc_err_cnt), [C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0, CNTR_NORMAL, access_pcic_post_hd_q_unc_err_cnt), [C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0, CNTR_NORMAL, access_pcic_retry_sot_mem_unc_err_cnt), [C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0, CNTR_NORMAL, access_pcic_retry_mem_unc_err), [C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0, CNTR_NORMAL, access_pcic_n_post_dat_q_parity_err_cnt), [C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0, CNTR_NORMAL, access_pcic_n_post_h_q_parity_err_cnt), [C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0, CNTR_NORMAL, access_pcic_cpl_dat_q_cor_err_cnt), [C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0, CNTR_NORMAL, access_pcic_cpl_hd_q_cor_err_cnt), [C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0, CNTR_NORMAL, access_pcic_post_dat_q_cor_err_cnt), [C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0, CNTR_NORMAL, access_pcic_post_hd_q_cor_err_cnt), [C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0, CNTR_NORMAL, access_pcic_retry_sot_mem_cor_err_cnt), [C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0, CNTR_NORMAL, access_pcic_retry_mem_cor_err_cnt), [C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM( "CceCli1AsyncFifoDbgParityError", 0, 0, CNTR_NORMAL, access_cce_cli1_async_fifo_dbg_parity_err_cnt), [C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM( "CceCli1AsyncFifoRxdmaParityError", 0, 0, CNTR_NORMAL, access_cce_cli1_async_fifo_rxdma_parity_err_cnt ), [C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM( "CceCli1AsyncFifoSdmaHdParityErr", 0, 0, CNTR_NORMAL, access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt), [C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM( "CceCli1AsyncFifoPioCrdtParityErr", 0, 0, CNTR_NORMAL, access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt), [C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0, 0, CNTR_NORMAL, access_cce_cli2_async_fifo_parity_err_cnt), [C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0, CNTR_NORMAL, access_cce_csr_cfg_bus_parity_err_cnt), [C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0, 0, CNTR_NORMAL, access_cce_cli0_async_fifo_parity_err_cnt), [C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0, CNTR_NORMAL, access_cce_rspd_data_parity_err_cnt), [C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0, CNTR_NORMAL, access_cce_trgt_access_err_cnt), [C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0, 0, CNTR_NORMAL, access_cce_trgt_async_fifo_parity_err_cnt), [C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0, CNTR_NORMAL, access_cce_csr_write_bad_addr_err_cnt), [C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0, CNTR_NORMAL, access_cce_csr_read_bad_addr_err_cnt), [C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0, CNTR_NORMAL, access_ccs_csr_parity_err_cnt), /* RcvErrStatus */ [C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0, CNTR_NORMAL, access_rx_csr_parity_err_cnt), [C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0, CNTR_NORMAL, access_rx_csr_write_bad_addr_err_cnt), [C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0, CNTR_NORMAL, access_rx_csr_read_bad_addr_err_cnt), [C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0, CNTR_NORMAL, access_rx_dma_csr_unc_err_cnt), [C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0, CNTR_NORMAL, access_rx_dma_dq_fsm_encoding_err_cnt), [C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0, CNTR_NORMAL, access_rx_dma_eq_fsm_encoding_err_cnt), [C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0, CNTR_NORMAL, access_rx_dma_csr_parity_err_cnt), [C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0, CNTR_NORMAL, access_rx_rbuf_data_cor_err_cnt), [C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0, CNTR_NORMAL, access_rx_rbuf_data_unc_err_cnt), [C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0, CNTR_NORMAL, access_rx_dma_data_fifo_rd_cor_err_cnt), [C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0, CNTR_NORMAL, access_rx_dma_data_fifo_rd_unc_err_cnt), [C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0, CNTR_NORMAL, access_rx_dma_hdr_fifo_rd_cor_err_cnt), [C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0, CNTR_NORMAL, access_rx_dma_hdr_fifo_rd_unc_err_cnt), [C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0, CNTR_NORMAL, access_rx_rbuf_desc_part2_cor_err_cnt), [C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0, CNTR_NORMAL, access_rx_rbuf_desc_part2_unc_err_cnt), [C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0, CNTR_NORMAL, access_rx_rbuf_desc_part1_cor_err_cnt), [C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0, CNTR_NORMAL, access_rx_rbuf_desc_part1_unc_err_cnt), [C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0, CNTR_NORMAL, access_rx_hq_intr_fsm_err_cnt), [C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0, CNTR_NORMAL, access_rx_hq_intr_csr_parity_err_cnt), [C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0, CNTR_NORMAL, access_rx_lookup_csr_parity_err_cnt), [C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0, CNTR_NORMAL, access_rx_lookup_rcv_array_cor_err_cnt), [C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0, CNTR_NORMAL, access_rx_lookup_rcv_array_unc_err_cnt), [C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0, 0, CNTR_NORMAL, access_rx_lookup_des_part2_parity_err_cnt), [C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0, 0, CNTR_NORMAL, access_rx_lookup_des_part1_unc_cor_err_cnt), [C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0, CNTR_NORMAL, access_rx_lookup_des_part1_unc_err_cnt), [C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0, CNTR_NORMAL, access_rx_rbuf_next_free_buf_cor_err_cnt), [C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0, CNTR_NORMAL, access_rx_rbuf_next_free_buf_unc_err_cnt), [C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM( "RxRbufFlInitWrAddrParityErr", 0, 0, CNTR_NORMAL, access_rbuf_fl_init_wr_addr_parity_err_cnt), [C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0, 0, CNTR_NORMAL, access_rx_rbuf_fl_initdone_parity_err_cnt), [C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0, 0, CNTR_NORMAL, access_rx_rbuf_fl_write_addr_parity_err_cnt), [C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0, CNTR_NORMAL, access_rx_rbuf_fl_rd_addr_parity_err_cnt), [C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0, CNTR_NORMAL, access_rx_rbuf_empty_err_cnt), [C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0, CNTR_NORMAL, access_rx_rbuf_full_err_cnt), [C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0, CNTR_NORMAL, access_rbuf_bad_lookup_err_cnt), [C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0, CNTR_NORMAL, access_rbuf_ctx_id_parity_err_cnt), [C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0, CNTR_NORMAL, access_rbuf_csr_qeopdw_parity_err_cnt), [C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM( "RxRbufCsrQNumOfPktParityErr", 0, 0, CNTR_NORMAL, access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt), [C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM( "RxRbufCsrQTlPtrParityErr", 0, 0, CNTR_NORMAL, access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt), [C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0, 0, CNTR_NORMAL, access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt), [C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0, 0, CNTR_NORMAL, access_rx_rbuf_csr_q_vld_bit_parity_err_cnt), [C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr", 0, 0, CNTR_NORMAL, access_rx_rbuf_csr_q_next_buf_parity_err_cnt), [C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0, 0, CNTR_NORMAL, access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt), [C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM( "RxRbufCsrQHeadBufNumParityErr", 0, 0, CNTR_NORMAL, access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt), [C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0, 0, CNTR_NORMAL, access_rx_rbuf_block_list_read_cor_err_cnt), [C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0, 0, CNTR_NORMAL, access_rx_rbuf_block_list_read_unc_err_cnt), [C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0, CNTR_NORMAL, access_rx_rbuf_lookup_des_cor_err_cnt), [C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0, CNTR_NORMAL, access_rx_rbuf_lookup_des_unc_err_cnt), [C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM( "RxRbufLookupDesRegUncCorErr", 0, 0, CNTR_NORMAL, access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt), [C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0, CNTR_NORMAL, access_rx_rbuf_lookup_des_reg_unc_err_cnt), [C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0, CNTR_NORMAL, access_rx_rbuf_free_list_cor_err_cnt), [C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0, CNTR_NORMAL, access_rx_rbuf_free_list_unc_err_cnt), [C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0, CNTR_NORMAL, access_rx_rcv_fsm_encoding_err_cnt), [C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0, CNTR_NORMAL, access_rx_dma_flag_cor_err_cnt), [C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0, CNTR_NORMAL, access_rx_dma_flag_unc_err_cnt), [C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0, CNTR_NORMAL, access_rx_dc_sop_eop_parity_err_cnt), [C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0, CNTR_NORMAL, access_rx_rcv_csr_parity_err_cnt), [C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0, CNTR_NORMAL, access_rx_rcv_qp_map_table_cor_err_cnt), [C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0, CNTR_NORMAL, access_rx_rcv_qp_map_table_unc_err_cnt), [C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0, CNTR_NORMAL, access_rx_rcv_data_cor_err_cnt), [C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0, CNTR_NORMAL, access_rx_rcv_data_unc_err_cnt), [C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0, CNTR_NORMAL, access_rx_rcv_hdr_cor_err_cnt), [C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0, CNTR_NORMAL, access_rx_rcv_hdr_unc_err_cnt), [C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0, CNTR_NORMAL, access_rx_dc_intf_parity_err_cnt), [C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0, CNTR_NORMAL, access_rx_dma_csr_cor_err_cnt), /* SendPioErrStatus */ [C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0, CNTR_NORMAL, access_pio_pec_sop_head_parity_err_cnt), [C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0, CNTR_NORMAL, access_pio_pcc_sop_head_parity_err_cnt), [C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr", 0, 0, CNTR_NORMAL, access_pio_last_returned_cnt_parity_err_cnt), [C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0, 0, CNTR_NORMAL, access_pio_current_free_cnt_parity_err_cnt), [C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0, CNTR_NORMAL, access_pio_reserved_31_err_cnt), [C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0, CNTR_NORMAL, access_pio_reserved_30_err_cnt), [C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0, CNTR_NORMAL, access_pio_ppmc_sop_len_err_cnt), [C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0, CNTR_NORMAL, access_pio_ppmc_bqc_mem_parity_err_cnt), [C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0, CNTR_NORMAL, access_pio_vl_fifo_parity_err_cnt), [C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0, CNTR_NORMAL, access_pio_vlf_sop_parity_err_cnt), [C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0, CNTR_NORMAL, access_pio_vlf_v1_len_parity_err_cnt), [C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0, CNTR_NORMAL, access_pio_block_qw_count_parity_err_cnt), [C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0, CNTR_NORMAL, access_pio_write_qw_valid_parity_err_cnt), [C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0, CNTR_NORMAL, access_pio_state_machine_err_cnt), [C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0, CNTR_NORMAL, access_pio_write_data_parity_err_cnt), [C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0, CNTR_NORMAL, access_pio_host_addr_mem_cor_err_cnt), [C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0, CNTR_NORMAL, access_pio_host_addr_mem_unc_err_cnt), [C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0, CNTR_NORMAL, access_pio_pkt_evict_sm_or_arb_sm_err_cnt), [C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0, CNTR_NORMAL, access_pio_init_sm_in_err_cnt), [C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0, CNTR_NORMAL, access_pio_ppmc_pbl_fifo_err_cnt), [C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0, 0, CNTR_NORMAL, access_pio_credit_ret_fifo_parity_err_cnt), [C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0, CNTR_NORMAL, access_pio_v1_len_mem_bank1_cor_err_cnt), [C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0, CNTR_NORMAL, access_pio_v1_len_mem_bank0_cor_err_cnt), [C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0, CNTR_NORMAL, access_pio_v1_len_mem_bank1_unc_err_cnt), [C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0, CNTR_NORMAL, access_pio_v1_len_mem_bank0_unc_err_cnt), [C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0, CNTR_NORMAL, access_pio_sm_pkt_reset_parity_err_cnt), [C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0, CNTR_NORMAL, access_pio_pkt_evict_fifo_parity_err_cnt), [C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM( "PioSbrdctrlCrrelFifoParityErr", 0, 0, CNTR_NORMAL, access_pio_sbrdctrl_crrel_fifo_parity_err_cnt), [C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0, CNTR_NORMAL, access_pio_sbrdctl_crrel_parity_err_cnt), [C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0, CNTR_NORMAL, access_pio_pec_fifo_parity_err_cnt), [C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0, CNTR_NORMAL, access_pio_pcc_fifo_parity_err_cnt), [C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0, CNTR_NORMAL, access_pio_sb_mem_fifo1_err_cnt), [C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0, CNTR_NORMAL, access_pio_sb_mem_fifo0_err_cnt), [C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0, CNTR_NORMAL, access_pio_csr_parity_err_cnt), [C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0, CNTR_NORMAL, access_pio_write_addr_parity_err_cnt), [C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0, CNTR_NORMAL, access_pio_write_bad_ctxt_err_cnt), /* SendDmaErrStatus */ [C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0, 0, CNTR_NORMAL, access_sdma_pcie_req_tracking_cor_err_cnt), [C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0, 0, CNTR_NORMAL, access_sdma_pcie_req_tracking_unc_err_cnt), [C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0, CNTR_NORMAL, access_sdma_csr_parity_err_cnt), [C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0, CNTR_NORMAL, access_sdma_rpy_tag_err_cnt), /* SendEgressErrStatus */ [C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0, CNTR_NORMAL, access_tx_read_pio_memory_csr_unc_err_cnt), [C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0, 0, CNTR_NORMAL, access_tx_read_sdma_memory_csr_err_cnt), [C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0, CNTR_NORMAL, access_tx_egress_fifo_cor_err_cnt), [C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0, CNTR_NORMAL, access_tx_read_pio_memory_cor_err_cnt), [C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0, CNTR_NORMAL, access_tx_read_sdma_memory_cor_err_cnt), [C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0, CNTR_NORMAL, access_tx_sb_hdr_cor_err_cnt), [C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0, CNTR_NORMAL, access_tx_credit_overrun_err_cnt), [C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0, CNTR_NORMAL, access_tx_launch_fifo8_cor_err_cnt), [C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0, CNTR_NORMAL, access_tx_launch_fifo7_cor_err_cnt), [C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0, CNTR_NORMAL, access_tx_launch_fifo6_cor_err_cnt), [C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0, CNTR_NORMAL, access_tx_launch_fifo5_cor_err_cnt), [C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0, CNTR_NORMAL, access_tx_launch_fifo4_cor_err_cnt), [C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0, CNTR_NORMAL, access_tx_launch_fifo3_cor_err_cnt), [C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0, CNTR_NORMAL, access_tx_launch_fifo2_cor_err_cnt), [C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0, CNTR_NORMAL, access_tx_launch_fifo1_cor_err_cnt), [C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0, CNTR_NORMAL, access_tx_launch_fifo0_cor_err_cnt), [C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0, CNTR_NORMAL, access_tx_credit_return_vl_err_cnt), [C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0, CNTR_NORMAL, access_tx_hcrc_insertion_err_cnt), [C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0, CNTR_NORMAL, access_tx_egress_fifo_unc_err_cnt), [C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0, CNTR_NORMAL, access_tx_read_pio_memory_unc_err_cnt), [C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0, CNTR_NORMAL, access_tx_read_sdma_memory_unc_err_cnt), [C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0, CNTR_NORMAL, access_tx_sb_hdr_unc_err_cnt), [C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0, CNTR_NORMAL, access_tx_credit_return_partiy_err_cnt), [C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr", 0, 0, CNTR_NORMAL, access_tx_launch_fifo8_unc_or_parity_err_cnt), [C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr", 0, 0, CNTR_NORMAL, access_tx_launch_fifo7_unc_or_parity_err_cnt), [C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr", 0, 0, CNTR_NORMAL, access_tx_launch_fifo6_unc_or_parity_err_cnt), [C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr", 0, 0, CNTR_NORMAL, access_tx_launch_fifo5_unc_or_parity_err_cnt), [C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr", 0, 0, CNTR_NORMAL, access_tx_launch_fifo4_unc_or_parity_err_cnt), [C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr", 0, 0, CNTR_NORMAL, access_tx_launch_fifo3_unc_or_parity_err_cnt), [C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr", 0, 0, CNTR_NORMAL, access_tx_launch_fifo2_unc_or_parity_err_cnt), [C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr", 0, 0, CNTR_NORMAL, access_tx_launch_fifo1_unc_or_parity_err_cnt), [C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr", 0, 0, CNTR_NORMAL, access_tx_launch_fifo0_unc_or_parity_err_cnt), [C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr", 0, 0, CNTR_NORMAL, access_tx_sdma15_disallowed_packet_err_cnt), [C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr", 0, 0, CNTR_NORMAL, access_tx_sdma14_disallowed_packet_err_cnt), [C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr", 0, 0, CNTR_NORMAL, access_tx_sdma13_disallowed_packet_err_cnt), [C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr", 0, 0, CNTR_NORMAL, access_tx_sdma12_disallowed_packet_err_cnt), [C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr", 0, 0, CNTR_NORMAL, access_tx_sdma11_disallowed_packet_err_cnt), [C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr", 0, 0, CNTR_NORMAL, access_tx_sdma10_disallowed_packet_err_cnt), [C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr", 0, 0, CNTR_NORMAL, access_tx_sdma9_disallowed_packet_err_cnt), [C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr", 0, 0, CNTR_NORMAL, access_tx_sdma8_disallowed_packet_err_cnt), [C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr", 0, 0, CNTR_NORMAL, access_tx_sdma7_disallowed_packet_err_cnt), [C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr", 0, 0, CNTR_NORMAL, access_tx_sdma6_disallowed_packet_err_cnt), [C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr", 0, 0, CNTR_NORMAL, access_tx_sdma5_disallowed_packet_err_cnt), [C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr", 0, 0, CNTR_NORMAL, access_tx_sdma4_disallowed_packet_err_cnt), [C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr", 0, 0, CNTR_NORMAL, access_tx_sdma3_disallowed_packet_err_cnt), [C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr", 0, 0, CNTR_NORMAL, access_tx_sdma2_disallowed_packet_err_cnt), [C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr", 0, 0, CNTR_NORMAL, access_tx_sdma1_disallowed_packet_err_cnt), [C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr", 0, 0, CNTR_NORMAL, access_tx_sdma0_disallowed_packet_err_cnt), [C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0, CNTR_NORMAL, access_tx_config_parity_err_cnt), [C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0, CNTR_NORMAL, access_tx_sbrd_ctl_csr_parity_err_cnt), [C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0, CNTR_NORMAL, access_tx_launch_csr_parity_err_cnt), [C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0, CNTR_NORMAL, access_tx_illegal_vl_err_cnt), [C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM( "TxSbrdCtlStateMachineParityErr", 0, 0, CNTR_NORMAL, access_tx_sbrd_ctl_state_machine_parity_err_cnt), [C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0, CNTR_NORMAL, access_egress_reserved_10_err_cnt), [C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0, CNTR_NORMAL, access_egress_reserved_9_err_cnt), [C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr", 0, 0, CNTR_NORMAL, access_tx_sdma_launch_intf_parity_err_cnt), [C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0, CNTR_NORMAL, access_tx_pio_launch_intf_parity_err_cnt), [C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0, CNTR_NORMAL, access_egress_reserved_6_err_cnt), [C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0, CNTR_NORMAL, access_tx_incorrect_link_state_err_cnt), [C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0, CNTR_NORMAL, access_tx_linkdown_err_cnt), [C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM( "EgressFifoUnderrunOrParityErr", 0, 0, CNTR_NORMAL, access_tx_egress_fifi_underrun_or_parity_err_cnt), [C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0, CNTR_NORMAL, access_egress_reserved_2_err_cnt), [C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0, CNTR_NORMAL, access_tx_pkt_integrity_mem_unc_err_cnt), [C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0, CNTR_NORMAL, access_tx_pkt_integrity_mem_cor_err_cnt), /* SendErrStatus */ [C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0, CNTR_NORMAL, access_send_csr_write_bad_addr_err_cnt), [C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0, CNTR_NORMAL, access_send_csr_read_bad_addr_err_cnt), [C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0, CNTR_NORMAL, access_send_csr_parity_cnt), /* SendCtxtErrStatus */ [C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0, CNTR_NORMAL, access_pio_write_out_of_bounds_err_cnt), [C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0, CNTR_NORMAL, access_pio_write_overflow_err_cnt), [C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr", 0, 0, CNTR_NORMAL, access_pio_write_crosses_boundary_err_cnt), [C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0, CNTR_NORMAL, access_pio_disallowed_packet_err_cnt), [C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0, CNTR_NORMAL, access_pio_inconsistent_sop_err_cnt), /* SendDmaEngErrStatus */ [C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr", 0, 0, CNTR_NORMAL, access_sdma_header_request_fifo_cor_err_cnt), [C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0, CNTR_NORMAL, access_sdma_header_storage_cor_err_cnt), [C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0, CNTR_NORMAL, access_sdma_packet_tracking_cor_err_cnt), [C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0, CNTR_NORMAL, access_sdma_assembly_cor_err_cnt), [C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0, CNTR_NORMAL, access_sdma_desc_table_cor_err_cnt), [C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr", 0, 0, CNTR_NORMAL, access_sdma_header_request_fifo_unc_err_cnt), [C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0, CNTR_NORMAL, access_sdma_header_storage_unc_err_cnt), [C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0, CNTR_NORMAL, access_sdma_packet_tracking_unc_err_cnt), [C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0, CNTR_NORMAL, access_sdma_assembly_unc_err_cnt), [C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0, CNTR_NORMAL, access_sdma_desc_table_unc_err_cnt), [C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0, CNTR_NORMAL, access_sdma_timeout_err_cnt), [C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0, CNTR_NORMAL, access_sdma_header_length_err_cnt), [C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0, CNTR_NORMAL, access_sdma_header_address_err_cnt), [C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0, CNTR_NORMAL, access_sdma_header_select_err_cnt), [C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0, CNTR_NORMAL, access_sdma_reserved_9_err_cnt), [C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0, CNTR_NORMAL, access_sdma_packet_desc_overflow_err_cnt), [C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0, CNTR_NORMAL, access_sdma_length_mismatch_err_cnt), [C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0, CNTR_NORMAL, access_sdma_halt_err_cnt), [C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0, CNTR_NORMAL, access_sdma_mem_read_err_cnt), [C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0, CNTR_NORMAL, access_sdma_first_desc_err_cnt), [C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0, CNTR_NORMAL, access_sdma_tail_out_of_bounds_err_cnt), [C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0, CNTR_NORMAL, access_sdma_too_long_err_cnt), [C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0, CNTR_NORMAL, access_sdma_gen_mismatch_err_cnt), [C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0, CNTR_NORMAL, access_sdma_wrong_dw_err_cnt), }; static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = { [C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT, CNTR_NORMAL), [C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT, CNTR_NORMAL), [C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT, CNTR_NORMAL), [C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT, CNTR_NORMAL), [C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT, CNTR_NORMAL), [C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT, CNTR_NORMAL), [C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT, CNTR_NORMAL), [C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL), [C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL), [C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH), [C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT, CNTR_SYNTH | CNTR_VL), [C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT, CNTR_SYNTH | CNTR_VL), [C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT, CNTR_SYNTH | CNTR_VL), [C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL), [C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL), [C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT, access_sw_link_dn_cnt), [C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT, access_sw_link_up_cnt), [C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL, access_sw_unknown_frame_cnt), [C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT, access_sw_xmit_discards), [C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0, CNTR_SYNTH | CNTR_32BIT | CNTR_VL, access_sw_xmit_discards), [C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH, access_xmit_constraint_errs), [C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH, access_rcv_constraint_errs), [C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts), [C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends), [C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks), [C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks), [C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts), [C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops), [C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait), [C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak), [C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq), [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq), [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned), [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks), [C_SW_IBP_RC_CRWAITS] = SW_IBP_CNTR(RcCrWait, rc_crwaits), [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL, access_sw_cpu_rc_acks), [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL, access_sw_cpu_rc_qacks), [C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL, access_sw_cpu_rc_delayed_comp), [OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1), [OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3), [OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5), [OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7), [OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9), [OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11), [OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13), [OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15), [OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17), [OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19), [OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21), [OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23), [OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25), [OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27), [OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29), [OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31), [OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33), [OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35), [OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37), [OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39), [OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41), [OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43), [OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45), [OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47), [OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49), [OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51), [OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53), [OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55), [OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57), [OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59), [OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61), [OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63), [OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65), [OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67), [OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69), [OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71), [OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73), [OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75), [OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77), [OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79), [OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81), [OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83), [OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85), [OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87), [OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89), [OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91), [OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93), [OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95), [OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97), [OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99), [OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101), [OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103), [OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105), [OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107), [OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109), [OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111), [OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113), [OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115), [OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117), [OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119), [OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121), [OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123), [OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125), [OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127), [OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129), [OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131), [OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133), [OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135), [OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137), [OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139), [OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141), [OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143), [OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145), [OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147), [OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149), [OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151), [OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153), [OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155), [OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157), [OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159), }; /* ======================================================================== */ /* return true if this is chip revision revision a */ int is_ax(struct hfi1_devdata *dd) { u8 chip_rev_minor = dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT & CCE_REVISION_CHIP_REV_MINOR_MASK; return (chip_rev_minor & 0xf0) == 0; } /* return true if this is chip revision revision b */ int is_bx(struct hfi1_devdata *dd) { u8 chip_rev_minor = dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT & CCE_REVISION_CHIP_REV_MINOR_MASK; return (chip_rev_minor & 0xF0) == 0x10; } /* return true is kernel urg disabled for rcd */ bool is_urg_masked(struct hfi1_ctxtdata *rcd) { u64 mask; u32 is = IS_RCVURGENT_START + rcd->ctxt; u8 bit = is % 64; mask = read_csr(rcd->dd, CCE_INT_MASK + (8 * (is / 64))); return !(mask & BIT_ULL(bit)); } /* * Append string s to buffer buf. Arguments curp and len are the current * position and remaining length, respectively. * * return 0 on success, 1 on out of room */ static int append_str(char *buf, char **curp, int *lenp, const char *s) { char *p = *curp; int len = *lenp; int result = 0; /* success */ char c; /* add a comma, if first in the buffer */ if (p != buf) { if (len == 0) { result = 1; /* out of room */ goto done; } *p++ = ','; len--; } /* copy the string */ while ((c = *s++) != 0) { if (len == 0) { result = 1; /* out of room */ goto done; } *p++ = c; len--; } done: /* write return values */ *curp = p; *lenp = len; return result; } /* * Using the given flag table, print a comma separated string into * the buffer. End in '*' if the buffer is too short. */ static char *flag_string(char *buf, int buf_len, u64 flags, struct flag_table *table, int table_size) { char extra[32]; char *p = buf; int len = buf_len; int no_room = 0; int i; /* make sure there is at least 2 so we can form "*" */ if (len < 2) return ""; len--; /* leave room for a nul */ for (i = 0; i < table_size; i++) { if (flags & table[i].flag) { no_room = append_str(buf, &p, &len, table[i].str); if (no_room) break; flags &= ~table[i].flag; } } /* any undocumented bits left? */ if (!no_room && flags) { snprintf(extra, sizeof(extra), "bits 0x%llx", flags); no_room = append_str(buf, &p, &len, extra); } /* add * if ran out of room */ if (no_room) { /* may need to back up to add space for a '*' */ if (len == 0) --p; *p++ = '*'; } /* add final nul - space already allocated above */ *p = 0; return buf; } /* first 8 CCE error interrupt source names */ static const char * const cce_misc_names[] = { "CceErrInt", /* 0 */ "RxeErrInt", /* 1 */ "MiscErrInt", /* 2 */ "Reserved3", /* 3 */ "PioErrInt", /* 4 */ "SDmaErrInt", /* 5 */ "EgressErrInt", /* 6 */ "TxeErrInt" /* 7 */ }; /* * Return the miscellaneous error interrupt name. */ static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source) { if (source < ARRAY_SIZE(cce_misc_names)) strncpy(buf, cce_misc_names[source], bsize); else snprintf(buf, bsize, "Reserved%u", source + IS_GENERAL_ERR_START); return buf; } /* * Return the SDMA engine error interrupt name. */ static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source) { snprintf(buf, bsize, "SDmaEngErrInt%u", source); return buf; } /* * Return the send context error interrupt name. */ static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source) { snprintf(buf, bsize, "SendCtxtErrInt%u", source); return buf; } static const char * const various_names[] = { "PbcInt", "GpioAssertInt", "Qsfp1Int", "Qsfp2Int", "TCritInt" }; /* * Return the various interrupt name. */ static char *is_various_name(char *buf, size_t bsize, unsigned int source) { if (source < ARRAY_SIZE(various_names)) strncpy(buf, various_names[source], bsize); else snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START); return buf; } /* * Return the DC interrupt name. */ static char *is_dc_name(char *buf, size_t bsize, unsigned int source) { static const char * const dc_int_names[] = { "common", "lcb", "8051", "lbm" /* local block merge */ }; if (source < ARRAY_SIZE(dc_int_names)) snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]); else snprintf(buf, bsize, "DCInt%u", source); return buf; } static const char * const sdma_int_names[] = { "SDmaInt", "SdmaIdleInt", "SdmaProgressInt", }; /* * Return the SDMA engine interrupt name. */ static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source) { /* what interrupt */ unsigned int what = source / TXE_NUM_SDMA_ENGINES; /* which engine */ unsigned int which = source % TXE_NUM_SDMA_ENGINES; if (likely(what < 3)) snprintf(buf, bsize, "%s%u", sdma_int_names[what], which); else snprintf(buf, bsize, "Invalid SDMA interrupt %u", source); return buf; } /* * Return the receive available interrupt name. */ static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source) { snprintf(buf, bsize, "RcvAvailInt%u", source); return buf; } /* * Return the receive urgent interrupt name. */ static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source) { snprintf(buf, bsize, "RcvUrgentInt%u", source); return buf; } /* * Return the send credit interrupt name. */ static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source) { snprintf(buf, bsize, "SendCreditInt%u", source); return buf; } /* * Return the reserved interrupt name. */ static char *is_reserved_name(char *buf, size_t bsize, unsigned int source) { snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START); return buf; } static char *cce_err_status_string(char *buf, int buf_len, u64 flags) { return flag_string(buf, buf_len, flags, cce_err_status_flags, ARRAY_SIZE(cce_err_status_flags)); } static char *rxe_err_status_string(char *buf, int buf_len, u64 flags) { return flag_string(buf, buf_len, flags, rxe_err_status_flags, ARRAY_SIZE(rxe_err_status_flags)); } static char *misc_err_status_string(char *buf, int buf_len, u64 flags) { return flag_string(buf, buf_len, flags, misc_err_status_flags, ARRAY_SIZE(misc_err_status_flags)); } static char *pio_err_status_string(char *buf, int buf_len, u64 flags) { return flag_string(buf, buf_len, flags, pio_err_status_flags, ARRAY_SIZE(pio_err_status_flags)); } static char *sdma_err_status_string(char *buf, int buf_len, u64 flags) { return flag_string(buf, buf_len, flags, sdma_err_status_flags, ARRAY_SIZE(sdma_err_status_flags)); } static char *egress_err_status_string(char *buf, int buf_len, u64 flags) { return flag_string(buf, buf_len, flags, egress_err_status_flags, ARRAY_SIZE(egress_err_status_flags)); } static char *egress_err_info_string(char *buf, int buf_len, u64 flags) { return flag_string(buf, buf_len, flags, egress_err_info_flags, ARRAY_SIZE(egress_err_info_flags)); } static char *send_err_status_string(char *buf, int buf_len, u64 flags) { return flag_string(buf, buf_len, flags, send_err_status_flags, ARRAY_SIZE(send_err_status_flags)); } static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg) { char buf[96]; int i = 0; /* * For most these errors, there is nothing that can be done except * report or record it. */ dd_dev_info(dd, "CCE Error: %s\n", cce_err_status_string(buf, sizeof(buf), reg)); if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) && is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) { /* this error requires a manual drop into SPC freeze mode */ /* then a fix up */ start_freeze_handling(dd->pport, FREEZE_SELF); } for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) { if (reg & (1ull << i)) { incr_cntr64(&dd->cce_err_status_cnt[i]); /* maintain a counter over all cce_err_status errors */ incr_cntr64(&dd->sw_cce_err_status_aggregate); } } } /* * Check counters for receive errors that do not have an interrupt * associated with them. */ #define RCVERR_CHECK_TIME 10 static void update_rcverr_timer(struct timer_list *t) { struct hfi1_devdata *dd = from_timer(dd, t, rcverr_timer); struct hfi1_pportdata *ppd = dd->pport; u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL); if (dd->rcv_ovfl_cnt < cur_ovfl_cnt && ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) { dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__); set_link_down_reason( ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN); queue_work(ppd->link_wq, &ppd->link_bounce_work); } dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt; mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME); } static int init_rcverr(struct hfi1_devdata *dd) { timer_setup(&dd->rcverr_timer, update_rcverr_timer, 0); /* Assume the hardware counter has been reset */ dd->rcv_ovfl_cnt = 0; return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME); } static void free_rcverr(struct hfi1_devdata *dd) { if (dd->rcverr_timer.function) del_timer_sync(&dd->rcverr_timer); } static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg) { char buf[96]; int i = 0; dd_dev_info(dd, "Receive Error: %s\n", rxe_err_status_string(buf, sizeof(buf), reg)); if (reg & ALL_RXE_FREEZE_ERR) { int flags = 0; /* * Freeze mode recovery is disabled for the errors * in RXE_FREEZE_ABORT_MASK */ if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK)) flags = FREEZE_ABORT; start_freeze_handling(dd->pport, flags); } for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) { if (reg & (1ull << i)) incr_cntr64(&dd->rcv_err_status_cnt[i]); } } static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg) { char buf[96]; int i = 0; dd_dev_info(dd, "Misc Error: %s", misc_err_status_string(buf, sizeof(buf), reg)); for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) { if (reg & (1ull << i)) incr_cntr64(&dd->misc_err_status_cnt[i]); } } static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg) { char buf[96]; int i = 0; dd_dev_info(dd, "PIO Error: %s\n", pio_err_status_string(buf, sizeof(buf), reg)); if (reg & ALL_PIO_FREEZE_ERR) start_freeze_handling(dd->pport, 0); for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) { if (reg & (1ull << i)) incr_cntr64(&dd->send_pio_err_status_cnt[i]); } } static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg) { char buf[96]; int i = 0; dd_dev_info(dd, "SDMA Error: %s\n", sdma_err_status_string(buf, sizeof(buf), reg)); if (reg & ALL_SDMA_FREEZE_ERR) start_freeze_handling(dd->pport, 0); for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) { if (reg & (1ull << i)) incr_cntr64(&dd->send_dma_err_status_cnt[i]); } } static inline void __count_port_discards(struct hfi1_pportdata *ppd) { incr_cntr64(&ppd->port_xmit_discards); } static void count_port_inactive(struct hfi1_devdata *dd) { __count_port_discards(dd->pport); } /* * We have had a "disallowed packet" error during egress. Determine the * integrity check which failed, and update relevant error counter, etc. * * Note that the SEND_EGRESS_ERR_INFO register has only a single * bit of state per integrity check, and so we can miss the reason for an * egress error if more than one packet fails the same integrity check * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO. */ static void handle_send_egress_err_info(struct hfi1_devdata *dd, int vl) { struct hfi1_pportdata *ppd = dd->pport; u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */ u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO); char buf[96]; /* clear down all observed info as quickly as possible after read */ write_csr(dd, SEND_EGRESS_ERR_INFO, info); dd_dev_info(dd, "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n", info, egress_err_info_string(buf, sizeof(buf), info), src); /* Eventually add other counters for each bit */ if (info & PORT_DISCARD_EGRESS_ERRS) { int weight, i; /* * Count all applicable bits as individual errors and * attribute them to the packet that triggered this handler. * This may not be completely accurate due to limitations * on the available hardware error information. There is * a single information register and any number of error * packets may have occurred and contributed to it before * this routine is called. This means that: * a) If multiple packets with the same error occur before * this routine is called, earlier packets are missed. * There is only a single bit for each error type. * b) Errors may not be attributed to the correct VL. * The driver is attributing all bits in the info register * to the packet that triggered this call, but bits * could be an accumulation of different packets with * different VLs. * c) A single error packet may have multiple counts attached * to it. There is no way for the driver to know if * multiple bits set in the info register are due to a * single packet or multiple packets. The driver assumes * multiple packets. */ weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS); for (i = 0; i < weight; i++) { __count_port_discards(ppd); if (vl >= 0 && vl < TXE_NUM_DATA_VL) incr_cntr64(&ppd->port_xmit_discards_vl[vl]); else if (vl == 15) incr_cntr64(&ppd->port_xmit_discards_vl [C_VL_15]); } } } /* * Input value is a bit position within the SEND_EGRESS_ERR_STATUS * register. Does it represent a 'port inactive' error? */ static inline int port_inactive_err(u64 posn) { return (posn >= SEES(TX_LINKDOWN) && posn <= SEES(TX_INCORRECT_LINK_STATE)); } /* * Input value is a bit position within the SEND_EGRESS_ERR_STATUS * register. Does it represent a 'disallowed packet' error? */ static inline int disallowed_pkt_err(int posn) { return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) && posn <= SEES(TX_SDMA15_DISALLOWED_PACKET)); } /* * Input value is a bit position of one of the SDMA engine disallowed * packet errors. Return which engine. Use of this must be guarded by * disallowed_pkt_err(). */ static inline int disallowed_pkt_engine(int posn) { return posn - SEES(TX_SDMA0_DISALLOWED_PACKET); } /* * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot * be done. */ static int engine_to_vl(struct hfi1_devdata *dd, int engine) { struct sdma_vl_map *m; int vl; /* range check */ if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES) return -1; rcu_read_lock(); m = rcu_dereference(dd->sdma_map); vl = m->engine_to_vl[engine]; rcu_read_unlock(); return vl; } /* * Translate the send context (sofware index) into a VL. Return -1 if the * translation cannot be done. */ static int sc_to_vl(struct hfi1_devdata *dd, int sw_index) { struct send_context_info *sci; struct send_context *sc; int i; sci = &dd->send_contexts[sw_index]; /* there is no information for user (PSM) and ack contexts */ if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15)) return -1; sc = sci->sc; if (!sc) return -1; if (dd->vld[15].sc == sc) return 15; for (i = 0; i < num_vls; i++) if (dd->vld[i].sc == sc) return i; return -1; } static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg) { u64 reg_copy = reg, handled = 0; char buf[96]; int i = 0; if (reg & ALL_TXE_EGRESS_FREEZE_ERR) start_freeze_handling(dd->pport, 0); else if (is_ax(dd) && (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) start_freeze_handling(dd->pport, 0); while (reg_copy) { int posn = fls64(reg_copy); /* fls64() returns a 1-based offset, we want it zero based */ int shift = posn - 1; u64 mask = 1ULL << shift; if (port_inactive_err(shift)) { count_port_inactive(dd); handled |= mask; } else if (disallowed_pkt_err(shift)) { int vl = engine_to_vl(dd, disallowed_pkt_engine(shift)); handle_send_egress_err_info(dd, vl); handled |= mask; } reg_copy &= ~mask; } reg &= ~handled; if (reg) dd_dev_info(dd, "Egress Error: %s\n", egress_err_status_string(buf, sizeof(buf), reg)); for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) { if (reg & (1ull << i)) incr_cntr64(&dd->send_egress_err_status_cnt[i]); } } static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg) { char buf[96]; int i = 0; dd_dev_info(dd, "Send Error: %s\n", send_err_status_string(buf, sizeof(buf), reg)); for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) { if (reg & (1ull << i)) incr_cntr64(&dd->send_err_status_cnt[i]); } } /* * The maximum number of times the error clear down will loop before * blocking a repeating error. This value is arbitrary. */ #define MAX_CLEAR_COUNT 20 /* * Clear and handle an error register. All error interrupts are funneled * through here to have a central location to correctly handle single- * or multi-shot errors. * * For non per-context registers, call this routine with a context value * of 0 so the per-context offset is zero. * * If the handler loops too many times, assume that something is wrong * and can't be fixed, so mask the error bits. */ static void interrupt_clear_down(struct hfi1_devdata *dd, u32 context, const struct err_reg_info *eri) { u64 reg; u32 count; /* read in a loop until no more errors are seen */ count = 0; while (1) { reg = read_kctxt_csr(dd, context, eri->status); if (reg == 0) break; write_kctxt_csr(dd, context, eri->clear, reg); if (likely(eri->handler)) eri->handler(dd, context, reg); count++; if (count > MAX_CLEAR_COUNT) { u64 mask; dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n", eri->desc, reg); /* * Read-modify-write so any other masked bits * remain masked. */ mask = read_kctxt_csr(dd, context, eri->mask); mask &= ~reg; write_kctxt_csr(dd, context, eri->mask, mask); break; } } } /* * CCE block "misc" interrupt. Source is < 16. */ static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source) { const struct err_reg_info *eri = &misc_errs[source]; if (eri->handler) { interrupt_clear_down(dd, 0, eri); } else { dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n", source); } } static char *send_context_err_status_string(char *buf, int buf_len, u64 flags) { return flag_string(buf, buf_len, flags, sc_err_status_flags, ARRAY_SIZE(sc_err_status_flags)); } /* * Send context error interrupt. Source (hw_context) is < 160. * * All send context errors cause the send context to halt. The normal * clear-down mechanism cannot be used because we cannot clear the * error bits until several other long-running items are done first. * This is OK because with the context halted, nothing else is going * to happen on it anyway. */ static void is_sendctxt_err_int(struct hfi1_devdata *dd, unsigned int hw_context) { struct send_context_info *sci; struct send_context *sc; char flags[96]; u64 status; u32 sw_index; int i = 0; unsigned long irq_flags; sw_index = dd->hw_to_sw[hw_context]; if (sw_index >= dd->num_send_contexts) { dd_dev_err(dd, "out of range sw index %u for send context %u\n", sw_index, hw_context); return; } sci = &dd->send_contexts[sw_index]; spin_lock_irqsave(&dd->sc_lock, irq_flags); sc = sci->sc; if (!sc) { dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__, sw_index, hw_context); spin_unlock_irqrestore(&dd->sc_lock, irq_flags); return; } /* tell the software that a halt has begun */ sc_stop(sc, SCF_HALTED); status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS); dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context, send_context_err_status_string(flags, sizeof(flags), status)); if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK) handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index)); /* * Automatically restart halted kernel contexts out of interrupt * context. User contexts must ask the driver to restart the context. */ if (sc->type != SC_USER) queue_work(dd->pport->hfi1_wq, &sc->halt_work); spin_unlock_irqrestore(&dd->sc_lock, irq_flags); /* * Update the counters for the corresponding status bits. * Note that these particular counters are aggregated over all * 160 contexts. */ for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) { if (status & (1ull << i)) incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]); } } static void handle_sdma_eng_err(struct hfi1_devdata *dd, unsigned int source, u64 status) { struct sdma_engine *sde; int i = 0; sde = &dd->per_sdma[source]; #ifdef CONFIG_SDMA_VERBOSITY dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n", sde->this_idx, source, (unsigned long long)status); #endif sde->err_cnt++; sdma_engine_error(sde, status); /* * Update the counters for the corresponding status bits. * Note that these particular counters are aggregated over * all 16 DMA engines. */ for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) { if (status & (1ull << i)) incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]); } } /* * CCE block SDMA error interrupt. Source is < 16. */ static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source) { #ifdef CONFIG_SDMA_VERBOSITY struct sdma_engine *sde = &dd->per_sdma[source]; dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx, source); sdma_dumpstate(sde); #endif interrupt_clear_down(dd, source, &sdma_eng_err); } /* * CCE block "various" interrupt. Source is < 8. */ static void is_various_int(struct hfi1_devdata *dd, unsigned int source) { const struct err_reg_info *eri = &various_err[source]; /* * TCritInt cannot go through interrupt_clear_down() * because it is not a second tier interrupt. The handler * should be called directly. */ if (source == TCRIT_INT_SOURCE) handle_temp_err(dd); else if (eri->handler) interrupt_clear_down(dd, 0, eri); else dd_dev_info(dd, "%s: Unimplemented/reserved interrupt %d\n", __func__, source); } static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg) { /* src_ctx is always zero */ struct hfi1_pportdata *ppd = dd->pport; unsigned long flags; u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N); if (reg & QSFP_HFI0_MODPRST_N) { if (!qsfp_mod_present(ppd)) { dd_dev_info(dd, "%s: QSFP module removed\n", __func__); ppd->driver_link_ready = 0; /* * Cable removed, reset all our information about the * cache and cable capabilities */ spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); /* * We don't set cache_refresh_required here as we expect * an interrupt when a cable is inserted */ ppd->qsfp_info.cache_valid = 0; ppd->qsfp_info.reset_needed = 0; ppd->qsfp_info.limiting_active = 0; spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags); /* Invert the ModPresent pin now to detect plug-in */ write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT, qsfp_int_mgmt); if ((ppd->offline_disabled_reason > HFI1_ODR_MASK( OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) || (ppd->offline_disabled_reason == HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))) ppd->offline_disabled_reason = HFI1_ODR_MASK( OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED); if (ppd->host_link_state == HLS_DN_POLL) { /* * The link is still in POLL. This means * that the normal link down processing * will not happen. We have to do it here * before turning the DC off. */ queue_work(ppd->link_wq, &ppd->link_down_work); } } else { dd_dev_info(dd, "%s: QSFP module inserted\n", __func__); spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); ppd->qsfp_info.cache_valid = 0; ppd->qsfp_info.cache_refresh_required = 1; spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags); /* * Stop inversion of ModPresent pin to detect * removal of the cable */ qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N; write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT, qsfp_int_mgmt); ppd->offline_disabled_reason = HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT); } } if (reg & QSFP_HFI0_INT_N) { dd_dev_info(dd, "%s: Interrupt received from QSFP module\n", __func__); spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); ppd->qsfp_info.check_interrupt_flags = 1; spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags); } /* Schedule the QSFP work only if there is a cable attached. */ if (qsfp_mod_present(ppd)) queue_work(ppd->link_wq, &ppd->qsfp_info.qsfp_work); } static int request_host_lcb_access(struct hfi1_devdata *dd) { int ret; ret = do_8051_command(dd, HCMD_MISC, (u64)HCMD_MISC_REQUEST_LCB_ACCESS << LOAD_DATA_FIELD_ID_SHIFT, NULL); if (ret != HCMD_SUCCESS && !(dd->flags & HFI1_SHUTDOWN)) { dd_dev_err(dd, "%s: command failed with error %d\n", __func__, ret); } return ret == HCMD_SUCCESS ? 0 : -EBUSY; } static int request_8051_lcb_access(struct hfi1_devdata *dd) { int ret; ret = do_8051_command(dd, HCMD_MISC, (u64)HCMD_MISC_GRANT_LCB_ACCESS << LOAD_DATA_FIELD_ID_SHIFT, NULL); if (ret != HCMD_SUCCESS) { dd_dev_err(dd, "%s: command failed with error %d\n", __func__, ret); } return ret == HCMD_SUCCESS ? 0 : -EBUSY; } /* * Set the LCB selector - allow host access. The DCC selector always * points to the host. */ static inline void set_host_lcb_access(struct hfi1_devdata *dd) { write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL, DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK | DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK); } /* * Clear the LCB selector - allow 8051 access. The DCC selector always * points to the host. */ static inline void set_8051_lcb_access(struct hfi1_devdata *dd) { write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL, DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK); } /* * Acquire LCB access from the 8051. If the host already has access, * just increment a counter. Otherwise, inform the 8051 that the * host is taking access. * * Returns: * 0 on success * -EBUSY if the 8051 has control and cannot be disturbed * -errno if unable to acquire access from the 8051 */ int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok) { struct hfi1_pportdata *ppd = dd->pport; int ret = 0; /* * Use the host link state lock so the operation of this routine * { link state check, selector change, count increment } can occur * as a unit against a link state change. Otherwise there is a * race between the state change and the count increment. */ if (sleep_ok) { mutex_lock(&ppd->hls_lock); } else { while (!mutex_trylock(&ppd->hls_lock)) udelay(1); } /* this access is valid only when the link is up */ if (ppd->host_link_state & HLS_DOWN) { dd_dev_info(dd, "%s: link state %s not up\n", __func__, link_state_name(ppd->host_link_state)); ret = -EBUSY; goto done; } if (dd->lcb_access_count == 0) { ret = request_host_lcb_access(dd); if (ret) { if (!(dd->flags & HFI1_SHUTDOWN)) dd_dev_err(dd, "%s: unable to acquire LCB access, err %d\n", __func__, ret); goto done; } set_host_lcb_access(dd); } dd->lcb_access_count++; done: mutex_unlock(&ppd->hls_lock); return ret; } /* * Release LCB access by decrementing the use count. If the count is moving * from 1 to 0, inform 8051 that it has control back. * * Returns: * 0 on success * -errno if unable to release access to the 8051 */ int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok) { int ret = 0; /* * Use the host link state lock because the acquire needed it. * Here, we only need to keep { selector change, count decrement } * as a unit. */ if (sleep_ok) { mutex_lock(&dd->pport->hls_lock); } else { while (!mutex_trylock(&dd->pport->hls_lock)) udelay(1); } if (dd->lcb_access_count == 0) { dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n", __func__); goto done; } if (dd->lcb_access_count == 1) { set_8051_lcb_access(dd); ret = request_8051_lcb_access(dd); if (ret) { dd_dev_err(dd, "%s: unable to release LCB access, err %d\n", __func__, ret); /* restore host access if the grant didn't work */ set_host_lcb_access(dd); goto done; } } dd->lcb_access_count--; done: mutex_unlock(&dd->pport->hls_lock); return ret; } /* * Initialize LCB access variables and state. Called during driver load, * after most of the initialization is finished. * * The DC default is LCB access on for the host. The driver defaults to * leaving access to the 8051. Assign access now - this constrains the call * to this routine to be after all LCB set-up is done. In particular, after * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts() */ static void init_lcb_access(struct hfi1_devdata *dd) { dd->lcb_access_count = 0; } /* * Write a response back to a 8051 request. */ static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data) { write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK | (u64)return_code << DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT | (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT); } /* * Handle host requests from the 8051. */ static void handle_8051_request(struct hfi1_pportdata *ppd) { struct hfi1_devdata *dd = ppd->dd; u64 reg; u16 data = 0; u8 type; reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1); if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0) return; /* no request */ /* zero out COMPLETED so the response is seen */ write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0); /* extract request details */ type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT) & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK; data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT) & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK; switch (type) { case HREQ_LOAD_CONFIG: case HREQ_SAVE_CONFIG: case HREQ_READ_CONFIG: case HREQ_SET_TX_EQ_ABS: case HREQ_SET_TX_EQ_REL: case HREQ_ENABLE: dd_dev_info(dd, "8051 request: request 0x%x not supported\n", type); hreq_response(dd, HREQ_NOT_SUPPORTED, 0); break; case HREQ_LCB_RESET: /* Put the LCB, RX FPE and TX FPE into reset */ write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_INTO_RESET); /* Make sure the write completed */ (void)read_csr(dd, DCC_CFG_RESET); /* Hold the reset long enough to take effect */ udelay(1); /* Take the LCB, RX FPE and TX FPE out of reset */ write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET); hreq_response(dd, HREQ_SUCCESS, 0); break; case HREQ_CONFIG_DONE: hreq_response(dd, HREQ_SUCCESS, 0); break; case HREQ_INTERFACE_TEST: hreq_response(dd, HREQ_SUCCESS, data); break; default: dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type); hreq_response(dd, HREQ_NOT_SUPPORTED, 0); break; } } /* * Set up allocation unit vaulue. */ void set_up_vau(struct hfi1_devdata *dd, u8 vau) { u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); /* do not modify other values in the register */ reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK; reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT; write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg); } /* * Set up initial VL15 credits of the remote. Assumes the rest of * the CM credit registers are zero from a previous global or credit reset. * Shared limit for VL15 will always be 0. */ void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf) { u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); /* set initial values for total and shared credit limit */ reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK | SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK); /* * Set total limit to be equal to VL15 credits. * Leave shared limit at 0. */ reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT; write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg); write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT); } /* * Zero all credit details from the previous connection and * reset the CM manager's internal counters. */ void reset_link_credits(struct hfi1_devdata *dd) { int i; /* remove all previous VL credit limits */ for (i = 0; i < TXE_NUM_DATA_VL; i++) write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0); write_csr(dd, SEND_CM_CREDIT_VL15, 0); write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0); /* reset the CM block */ pio_send_control(dd, PSC_CM_RESET); /* reset cached value */ dd->vl15buf_cached = 0; } /* convert a vCU to a CU */ static u32 vcu_to_cu(u8 vcu) { return 1 << vcu; } /* convert a CU to a vCU */ static u8 cu_to_vcu(u32 cu) { return ilog2(cu); } /* convert a vAU to an AU */ static u32 vau_to_au(u8 vau) { return 8 * (1 << vau); } static void set_linkup_defaults(struct hfi1_pportdata *ppd) { ppd->sm_trap_qp = 0x0; ppd->sa_qp = 0x1; } /* * Graceful LCB shutdown. This leaves the LCB FIFOs in reset. */ static void lcb_shutdown(struct hfi1_devdata *dd, int abort) { u64 reg; /* clear lcb run: LCB_CFG_RUN.EN = 0 */ write_csr(dd, DC_LCB_CFG_RUN, 0); /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */ write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT); /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */ dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN); reg = read_csr(dd, DCC_CFG_RESET); write_csr(dd, DCC_CFG_RESET, reg | DCC_CFG_RESET_RESET_LCB | DCC_CFG_RESET_RESET_RX_FPE); (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */ if (!abort) { udelay(1); /* must hold for the longer of 16cclks or 20ns */ write_csr(dd, DCC_CFG_RESET, reg); write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en); } } /* * This routine should be called after the link has been transitioned to * OFFLINE (OFFLINE state has the side effect of putting the SerDes into * reset). * * The expectation is that the caller of this routine would have taken * care of properly transitioning the link into the correct state. * NOTE: the caller needs to acquire the dd->dc8051_lock lock * before calling this function. */ static void _dc_shutdown(struct hfi1_devdata *dd) { lockdep_assert_held(&dd->dc8051_lock); if (dd->dc_shutdown) return; dd->dc_shutdown = 1; /* Shutdown the LCB */ lcb_shutdown(dd, 1); /* * Going to OFFLINE would have causes the 8051 to put the * SerDes into reset already. Just need to shut down the 8051, * itself. */ write_csr(dd, DC_DC8051_CFG_RST, 0x1); } static void dc_shutdown(struct hfi1_devdata *dd) { mutex_lock(&dd->dc8051_lock); _dc_shutdown(dd); mutex_unlock(&dd->dc8051_lock); } /* * Calling this after the DC has been brought out of reset should not * do any damage. * NOTE: the caller needs to acquire the dd->dc8051_lock lock * before calling this function. */ static void _dc_start(struct hfi1_devdata *dd) { lockdep_assert_held(&dd->dc8051_lock); if (!dd->dc_shutdown) return; /* Take the 8051 out of reset */ write_csr(dd, DC_DC8051_CFG_RST, 0ull); /* Wait until 8051 is ready */ if (wait_fm_ready(dd, TIMEOUT_8051_START)) dd_dev_err(dd, "%s: timeout starting 8051 firmware\n", __func__); /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */ write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET); /* lcb_shutdown() with abort=1 does not restore these */ write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en); dd->dc_shutdown = 0; } static void dc_start(struct hfi1_devdata *dd) { mutex_lock(&dd->dc8051_lock); _dc_start(dd); mutex_unlock(&dd->dc8051_lock); } /* * These LCB adjustments are for the Aurora SerDes core in the FPGA. */ static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd) { u64 rx_radr, tx_radr; u32 version; if (dd->icode != ICODE_FPGA_EMULATION) return; /* * These LCB defaults on emulator _s are good, nothing to do here: * LCB_CFG_TX_FIFOS_RADR * LCB_CFG_RX_FIFOS_RADR * LCB_CFG_LN_DCLK * LCB_CFG_IGNORE_LOST_RCLK */ if (is_emulator_s(dd)) return; /* else this is _p */ version = emulator_rev(dd); if (!is_ax(dd)) version = 0x2d; /* all B0 use 0x2d or higher settings */ if (version <= 0x12) { /* release 0x12 and below */ /* * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa */ rx_radr = 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT; /* * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default) * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6 */ tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT; } else if (version <= 0x18) { /* release 0x13 up to 0x18 */ /* LCB_CFG_RX_FIFOS_RADR = 0x988 */ rx_radr = 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT; tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT; } else if (version == 0x19) { /* release 0x19 */ /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */ rx_radr = 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT; tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT; } else if (version == 0x1a) { /* release 0x1a */ /* LCB_CFG_RX_FIFOS_RADR = 0x988 */ rx_radr = 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT; tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT; write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull); } else { /* release 0x1b and higher */ /* LCB_CFG_RX_FIFOS_RADR = 0x877 */ rx_radr = 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT; tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT; } write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr); /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */ write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK); write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr); } /* * Handle a SMA idle message * * This is a work-queue function outside of the interrupt. */ void handle_sma_message(struct work_struct *work) { struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, sma_message_work); struct hfi1_devdata *dd = ppd->dd; u64 msg; int ret; /* * msg is bytes 1-4 of the 40-bit idle message - the command code * is stripped off */ ret = read_idle_sma(dd, &msg); if (ret) return; dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg); /* * React to the SMA message. Byte[1] (0 for us) is the command. */ switch (msg & 0xff) { case SMA_IDLE_ARM: /* * See OPAv1 table 9-14 - HFI and External Switch Ports Key * State Transitions * * Only expected in INIT or ARMED, discard otherwise. */ if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED)) ppd->neighbor_normal = 1; break; case SMA_IDLE_ACTIVE: /* * See OPAv1 table 9-14 - HFI and External Switch Ports Key * State Transitions * * Can activate the node. Discard otherwise. */ if (ppd->host_link_state == HLS_UP_ARMED && ppd->is_active_optimize_enabled) { ppd->neighbor_normal = 1; ret = set_link_state(ppd, HLS_UP_ACTIVE); if (ret) dd_dev_err( dd, "%s: received Active SMA idle message, couldn't set link to Active\n", __func__); } break; default: dd_dev_err(dd, "%s: received unexpected SMA idle message 0x%llx\n", __func__, msg); break; } } static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear) { u64 rcvctrl; unsigned long flags; spin_lock_irqsave(&dd->rcvctrl_lock, flags); rcvctrl = read_csr(dd, RCV_CTRL); rcvctrl |= add; rcvctrl &= ~clear; write_csr(dd, RCV_CTRL, rcvctrl); spin_unlock_irqrestore(&dd->rcvctrl_lock, flags); } static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add) { adjust_rcvctrl(dd, add, 0); } static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear) { adjust_rcvctrl(dd, 0, clear); } /* * Called from all interrupt handlers to start handling an SPC freeze. */ void start_freeze_handling(struct hfi1_pportdata *ppd, int flags) { struct hfi1_devdata *dd = ppd->dd; struct send_context *sc; int i; int sc_flags; if (flags & FREEZE_SELF) write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK); /* enter frozen mode */ dd->flags |= HFI1_FROZEN; /* notify all SDMA engines that they are going into a freeze */ sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN)); sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ? SCF_LINK_DOWN : 0); /* do halt pre-handling on all enabled send contexts */ for (i = 0; i < dd->num_send_contexts; i++) { sc = dd->send_contexts[i].sc; if (sc && (sc->flags & SCF_ENABLED)) sc_stop(sc, sc_flags); } /* Send context are frozen. Notify user space */ hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT); if (flags & FREEZE_ABORT) { dd_dev_err(dd, "Aborted freeze recovery. Please REBOOT system\n"); return; } /* queue non-interrupt handler */ queue_work(ppd->hfi1_wq, &ppd->freeze_work); } /* * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen, * depending on the "freeze" parameter. * * No need to return an error if it times out, our only option * is to proceed anyway. */ static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze) { unsigned long timeout; u64 reg; timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT); while (1) { reg = read_csr(dd, CCE_STATUS); if (freeze) { /* waiting until all indicators are set */ if ((reg & ALL_FROZE) == ALL_FROZE) return; /* all done */ } else { /* waiting until all indicators are clear */ if ((reg & ALL_FROZE) == 0) return; /* all done */ } if (time_after(jiffies, timeout)) { dd_dev_err(dd, "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing", freeze ? "" : "un", reg & ALL_FROZE, freeze ? ALL_FROZE : 0ull); return; } usleep_range(80, 120); } } /* * Do all freeze handling for the RXE block. */ static void rxe_freeze(struct hfi1_devdata *dd) { int i; struct hfi1_ctxtdata *rcd; /* disable port */ clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); /* disable all receive contexts */ for (i = 0; i < dd->num_rcv_contexts; i++) { rcd = hfi1_rcd_get_by_index(dd, i); hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, rcd); hfi1_rcd_put(rcd); } } /* * Unfreeze handling for the RXE block - kernel contexts only. * This will also enable the port. User contexts will do unfreeze * handling on a per-context basis as they call into the driver. * */ static void rxe_kernel_unfreeze(struct hfi1_devdata *dd) { u32 rcvmask; u16 i; struct hfi1_ctxtdata *rcd; /* enable all kernel contexts */ for (i = 0; i < dd->num_rcv_contexts; i++) { rcd = hfi1_rcd_get_by_index(dd, i); /* Ensure all non-user contexts(including vnic) are enabled */ if (!rcd || (i >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic)) { hfi1_rcd_put(rcd); continue; } rcvmask = HFI1_RCVCTRL_CTXT_ENB; /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */ rcvmask |= hfi1_rcvhdrtail_kvaddr(rcd) ? HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS; hfi1_rcvctrl(dd, rcvmask, rcd); hfi1_rcd_put(rcd); } /* enable port */ add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); } /* * Non-interrupt SPC freeze handling. * * This is a work-queue function outside of the triggering interrupt. */ void handle_freeze(struct work_struct *work) { struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, freeze_work); struct hfi1_devdata *dd = ppd->dd; /* wait for freeze indicators on all affected blocks */ wait_for_freeze_status(dd, 1); /* SPC is now frozen */ /* do send PIO freeze steps */ pio_freeze(dd); /* do send DMA freeze steps */ sdma_freeze(dd); /* do send egress freeze steps - nothing to do */ /* do receive freeze steps */ rxe_freeze(dd); /* * Unfreeze the hardware - clear the freeze, wait for each * block's frozen bit to clear, then clear the frozen flag. */ write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK); wait_for_freeze_status(dd, 0); if (is_ax(dd)) { write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK); wait_for_freeze_status(dd, 1); write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK); wait_for_freeze_status(dd, 0); } /* do send PIO unfreeze steps for kernel contexts */ pio_kernel_unfreeze(dd); /* do send DMA unfreeze steps */ sdma_unfreeze(dd); /* do send egress unfreeze steps - nothing to do */ /* do receive unfreeze steps for kernel contexts */ rxe_kernel_unfreeze(dd); /* * The unfreeze procedure touches global device registers when * it disables and re-enables RXE. Mark the device unfrozen * after all that is done so other parts of the driver waiting * for the device to unfreeze don't do things out of order. * * The above implies that the meaning of HFI1_FROZEN flag is * "Device has gone into freeze mode and freeze mode handling * is still in progress." * * The flag will be removed when freeze mode processing has * completed. */ dd->flags &= ~HFI1_FROZEN; wake_up(&dd->event_queue); /* no longer frozen */ } /** * update_xmit_counters - update PortXmitWait/PortVlXmitWait * counters. * @ppd: info of physical Hfi port * @link_width: new link width after link up or downgrade * * Update the PortXmitWait and PortVlXmitWait counters after * a link up or downgrade event to reflect a link width change. */ static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width) { int i; u16 tx_width; u16 link_speed; tx_width = tx_link_width(link_width); link_speed = get_link_speed(ppd->link_speed_active); /* * There are C_VL_COUNT number of PortVLXmitWait counters. * Adding 1 to C_VL_COUNT to include the PortXmitWait counter. */ for (i = 0; i < C_VL_COUNT + 1; i++) get_xmit_wait_counters(ppd, tx_width, link_speed, i); } /* * Handle a link up interrupt from the 8051. * * This is a work-queue function outside of the interrupt. */ void handle_link_up(struct work_struct *work) { struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, link_up_work); struct hfi1_devdata *dd = ppd->dd; set_link_state(ppd, HLS_UP_INIT); /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */ read_ltp_rtt(dd); /* * OPA specifies that certain counters are cleared on a transition * to link up, so do that. */ clear_linkup_counters(dd); /* * And (re)set link up default values. */ set_linkup_defaults(ppd); /* * Set VL15 credits. Use cached value from verify cap interrupt. * In case of quick linkup or simulator, vl15 value will be set by * handle_linkup_change. VerifyCap interrupt handler will not be * called in those scenarios. */ if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) set_up_vl15(dd, dd->vl15buf_cached); /* enforce link speed enabled */ if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) { /* oops - current speed is not enabled, bounce */ dd_dev_err(dd, "Link speed active 0x%x is outside enabled 0x%x, downing link\n", ppd->link_speed_active, ppd->link_speed_enabled); set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0, OPA_LINKDOWN_REASON_SPEED_POLICY); set_link_state(ppd, HLS_DN_OFFLINE); start_link(ppd); } } /* * Several pieces of LNI information were cached for SMA in ppd. * Reset these on link down */ static void reset_neighbor_info(struct hfi1_pportdata *ppd) { ppd->neighbor_guid = 0; ppd->neighbor_port_number = 0; ppd->neighbor_type = 0; ppd->neighbor_fm_security = 0; } static const char * const link_down_reason_strs[] = { [OPA_LINKDOWN_REASON_NONE] = "None", [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Receive error 0", [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length", [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long", [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short", [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID", [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID", [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2", [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC", [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8", [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail", [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10", [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error", [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15", [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker", [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14", [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15", [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance", [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance", [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance", [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack", [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker", [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt", [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit", [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit", [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24", [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25", [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26", [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27", [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28", [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29", [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30", [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] = "Excessive buffer overrun", [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown", [OPA_LINKDOWN_REASON_REBOOT] = "Reboot", [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown", [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce", [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy", [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy", [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected", [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] = "Local media not installed", [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed", [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config", [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] = "End to end not installed", [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy", [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy", [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy", [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management", [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled", [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient" }; /* return the neighbor link down reason string */ static const char *link_down_reason_str(u8 reason) { const char *str = NULL; if (reason < ARRAY_SIZE(link_down_reason_strs)) str = link_down_reason_strs[reason]; if (!str) str = "(invalid)"; return str; } /* * Handle a link down interrupt from the 8051. * * This is a work-queue function outside of the interrupt. */ void handle_link_down(struct work_struct *work) { u8 lcl_reason, neigh_reason = 0; u8 link_down_reason; struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, link_down_work); int was_up; static const char ldr_str[] = "Link down reason: "; if ((ppd->host_link_state & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) && ppd->port_type == PORT_TYPE_FIXED) ppd->offline_disabled_reason = HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED); /* Go offline first, then deal with reading/writing through 8051 */ was_up = !!(ppd->host_link_state & HLS_UP); set_link_state(ppd, HLS_DN_OFFLINE); xchg(&ppd->is_link_down_queued, 0); if (was_up) { lcl_reason = 0; /* link down reason is only valid if the link was up */ read_link_down_reason(ppd->dd, &link_down_reason); switch (link_down_reason) { case LDR_LINK_TRANSFER_ACTIVE_LOW: /* the link went down, no idle message reason */ dd_dev_info(ppd->dd, "%sUnexpected link down\n", ldr_str); break; case LDR_RECEIVED_LINKDOWN_IDLE_MSG: /* * The neighbor reason is only valid if an idle message * was received for it. */ read_planned_down_reason_code(ppd->dd, &neigh_reason); dd_dev_info(ppd->dd, "%sNeighbor link down message %d, %s\n", ldr_str, neigh_reason, link_down_reason_str(neigh_reason)); break; case LDR_RECEIVED_HOST_OFFLINE_REQ: dd_dev_info(ppd->dd, "%sHost requested link to go offline\n", ldr_str); break; default: dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n", ldr_str, link_down_reason); break; } /* * If no reason, assume peer-initiated but missed * LinkGoingDown idle flits. */ if (neigh_reason == 0) lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN; } else { /* went down while polling or going up */ lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT; } set_link_down_reason(ppd, lcl_reason, neigh_reason, 0); /* inform the SMA when the link transitions from up to down */ if (was_up && ppd->local_link_down_reason.sma == 0 && ppd->neigh_link_down_reason.sma == 0) { ppd->local_link_down_reason.sma = ppd->local_link_down_reason.latest; ppd->neigh_link_down_reason.sma = ppd->neigh_link_down_reason.latest; } reset_neighbor_info(ppd); /* disable the port */ clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); /* * If there is no cable attached, turn the DC off. Otherwise, * start the link bring up. */ if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd)) dc_shutdown(ppd->dd); else start_link(ppd); } void handle_link_bounce(struct work_struct *work) { struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, link_bounce_work); /* * Only do something if the link is currently up. */ if (ppd->host_link_state & HLS_UP) { set_link_state(ppd, HLS_DN_OFFLINE); start_link(ppd); } else { dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n", __func__, link_state_name(ppd->host_link_state)); } } /* * Mask conversion: Capability exchange to Port LTP. The capability * exchange has an implicit 16b CRC that is mandatory. */ static int cap_to_port_ltp(int cap) { int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */ if (cap & CAP_CRC_14B) port_ltp |= PORT_LTP_CRC_MODE_14; if (cap & CAP_CRC_48B) port_ltp |= PORT_LTP_CRC_MODE_48; if (cap & CAP_CRC_12B_16B_PER_LANE) port_ltp |= PORT_LTP_CRC_MODE_PER_LANE; return port_ltp; } /* * Convert an OPA Port LTP mask to capability mask */ int port_ltp_to_cap(int port_ltp) { int cap_mask = 0; if (port_ltp & PORT_LTP_CRC_MODE_14) cap_mask |= CAP_CRC_14B; if (port_ltp & PORT_LTP_CRC_MODE_48) cap_mask |= CAP_CRC_48B; if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE) cap_mask |= CAP_CRC_12B_16B_PER_LANE; return cap_mask; } /* * Convert a single DC LCB CRC mode to an OPA Port LTP mask. */ static int lcb_to_port_ltp(int lcb_crc) { int port_ltp = 0; if (lcb_crc == LCB_CRC_12B_16B_PER_LANE) port_ltp = PORT_LTP_CRC_MODE_PER_LANE; else if (lcb_crc == LCB_CRC_48B) port_ltp = PORT_LTP_CRC_MODE_48; else if (lcb_crc == LCB_CRC_14B) port_ltp = PORT_LTP_CRC_MODE_14; else port_ltp = PORT_LTP_CRC_MODE_16; return port_ltp; } static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd) { if (ppd->pkeys[2] != 0) { ppd->pkeys[2] = 0; (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); hfi1_event_pkey_change(ppd->dd, ppd->port); } } /* * Convert the given link width to the OPA link width bitmask. */ static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width) { switch (width) { case 0: /* * Simulator and quick linkup do not set the width. * Just set it to 4x without complaint. */ if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup) return OPA_LINK_WIDTH_4X; return 0; /* no lanes up */ case 1: return OPA_LINK_WIDTH_1X; case 2: return OPA_LINK_WIDTH_2X; case 3: return OPA_LINK_WIDTH_3X; case 4: return OPA_LINK_WIDTH_4X; default: dd_dev_info(dd, "%s: invalid width %d, using 4\n", __func__, width); return OPA_LINK_WIDTH_4X; } } /* * Do a population count on the bottom nibble. */ static const u8 bit_counts[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4 }; static inline u8 nibble_to_count(u8 nibble) { return bit_counts[nibble & 0xf]; } /* * Read the active lane information from the 8051 registers and return * their widths. * * Active lane information is found in these 8051 registers: * enable_lane_tx * enable_lane_rx */ static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width, u16 *rx_width) { u16 tx, rx; u8 enable_lane_rx; u8 enable_lane_tx; u8 tx_polarity_inversion; u8 rx_polarity_inversion; u8 max_rate; /* read the active lanes */ read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion, &rx_polarity_inversion, &max_rate); read_local_lni(dd, &enable_lane_rx); /* convert to counts */ tx = nibble_to_count(enable_lane_tx); rx = nibble_to_count(enable_lane_rx); /* * Set link_speed_active here, overriding what was set in * handle_verify_cap(). The ASIC 8051 firmware does not correctly * set the max_rate field in handle_verify_cap until v0.19. */ if ((dd->icode == ICODE_RTL_SILICON) && (dd->dc8051_ver < dc8051_ver(0, 19, 0))) { /* max_rate: 0 = 12.5G, 1 = 25G */ switch (max_rate) { case 0: dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G; break; case 1: dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G; break; default: dd_dev_err(dd, "%s: unexpected max rate %d, using 25Gb\n", __func__, (int)max_rate); dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G; break; } } dd_dev_info(dd, "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n", enable_lane_tx, tx, enable_lane_rx, rx); *tx_width = link_width_to_bits(dd, tx); *rx_width = link_width_to_bits(dd, rx); } /* * Read verify_cap_local_fm_link_width[1] to obtain the link widths. * Valid after the end of VerifyCap and during LinkUp. Does not change * after link up. I.e. look elsewhere for downgrade information. * * Bits are: * + bits [7:4] contain the number of active transmitters * + bits [3:0] contain the number of active receivers * These are numbers 1 through 4 and can be different values if the * link is asymmetric. * * verify_cap_local_fm_link_width[0] retains its original value. */ static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width, u16 *rx_width) { u16 widths, tx, rx; u8 misc_bits, local_flags; u16 active_tx, active_rx; read_vc_local_link_mode(dd, &misc_bits, &local_flags, &widths); tx = widths >> 12; rx = (widths >> 8) & 0xf; *tx_width = link_width_to_bits(dd, tx); *rx_width = link_width_to_bits(dd, rx); /* print the active widths */ get_link_widths(dd, &active_tx, &active_rx); } /* * Set ppd->link_width_active and ppd->link_width_downgrade_active using * hardware information when the link first comes up. * * The link width is not available until after VerifyCap.AllFramesReceived * (the trigger for handle_verify_cap), so this is outside that routine * and should be called when the 8051 signals linkup. */ void get_linkup_link_widths(struct hfi1_pportdata *ppd) { u16 tx_width, rx_width; /* get end-of-LNI link widths */ get_linkup_widths(ppd->dd, &tx_width, &rx_width); /* use tx_width as the link is supposed to be symmetric on link up */ ppd->link_width_active = tx_width; /* link width downgrade active (LWD.A) starts out matching LW.A */ ppd->link_width_downgrade_tx_active = ppd->link_width_active; ppd->link_width_downgrade_rx_active = ppd->link_width_active; /* per OPA spec, on link up LWD.E resets to LWD.S */ ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported; /* cache the active egress rate (units {10^6 bits/sec]) */ ppd->current_egress_rate = active_egress_rate(ppd); } /* * Handle a verify capabilities interrupt from the 8051. * * This is a work-queue function outside of the interrupt. */ void handle_verify_cap(struct work_struct *work) { struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, link_vc_work); struct hfi1_devdata *dd = ppd->dd; u64 reg; u8 power_management; u8 continuous; u8 vcu; u8 vau; u8 z; u16 vl15buf; u16 link_widths; u16 crc_mask; u16 crc_val; u16 device_id; u16 active_tx, active_rx; u8 partner_supported_crc; u8 remote_tx_rate; u8 device_rev; set_link_state(ppd, HLS_VERIFY_CAP); lcb_shutdown(dd, 0); adjust_lcb_for_fpga_serdes(dd); read_vc_remote_phy(dd, &power_management, &continuous); read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf, &partner_supported_crc); read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths); read_remote_device_id(dd, &device_id, &device_rev); /* print the active widths */ get_link_widths(dd, &active_tx, &active_rx); dd_dev_info(dd, "Peer PHY: power management 0x%x, continuous updates 0x%x\n", (int)power_management, (int)continuous); dd_dev_info(dd, "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n", (int)vau, (int)z, (int)vcu, (int)vl15buf, (int)partner_supported_crc); dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n", (u32)remote_tx_rate, (u32)link_widths); dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n", (u32)device_id, (u32)device_rev); /* * The peer vAU value just read is the peer receiver value. HFI does * not support a transmit vAU of 0 (AU == 8). We advertised that * with Z=1 in the fabric capabilities sent to the peer. The peer * will see our Z=1, and, if it advertised a vAU of 0, will move its * receive to vAU of 1 (AU == 16). Do the same here. We do not care * about the peer Z value - our sent vAU is 3 (hardwired) and is not * subject to the Z value exception. */ if (vau == 0) vau = 1; set_up_vau(dd, vau); /* * Set VL15 credits to 0 in global credit register. Cache remote VL15 * credits value and wait for link-up interrupt ot set it. */ set_up_vl15(dd, 0); dd->vl15buf_cached = vl15buf; /* set up the LCB CRC mode */ crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc; /* order is important: use the lowest bit in common */ if (crc_mask & CAP_CRC_14B) crc_val = LCB_CRC_14B; else if (crc_mask & CAP_CRC_48B) crc_val = LCB_CRC_48B; else if (crc_mask & CAP_CRC_12B_16B_PER_LANE) crc_val = LCB_CRC_12B_16B_PER_LANE; else crc_val = LCB_CRC_16B; dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val); write_csr(dd, DC_LCB_CFG_CRC_MODE, (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT); /* set (14b only) or clear sideband credit */ reg = read_csr(dd, SEND_CM_CTRL); if (crc_val == LCB_CRC_14B && crc_14b_sideband) { write_csr(dd, SEND_CM_CTRL, reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK); } else { write_csr(dd, SEND_CM_CTRL, reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK); } ppd->link_speed_active = 0; /* invalid value */ if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) { /* remote_tx_rate: 0 = 12.5G, 1 = 25G */ switch (remote_tx_rate) { case 0: ppd->link_speed_active = OPA_LINK_SPEED_12_5G; break; case 1: ppd->link_speed_active = OPA_LINK_SPEED_25G; break; } } else { /* actual rate is highest bit of the ANDed rates */ u8 rate = remote_tx_rate & ppd->local_tx_rate; if (rate & 2) ppd->link_speed_active = OPA_LINK_SPEED_25G; else if (rate & 1) ppd->link_speed_active = OPA_LINK_SPEED_12_5G; } if (ppd->link_speed_active == 0) { dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n", __func__, (int)remote_tx_rate); ppd->link_speed_active = OPA_LINK_SPEED_25G; } /* * Cache the values of the supported, enabled, and active * LTP CRC modes to return in 'portinfo' queries. But the bit * flags that are returned in the portinfo query differ from * what's in the link_crc_mask, crc_sizes, and crc_val * variables. Convert these here. */ ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8; /* supported crc modes */ ppd->port_ltp_crc_mode |= cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4; /* enabled crc modes */ ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val); /* active crc mode */ /* set up the remote credit return table */ assign_remote_cm_au_table(dd, vcu); /* * The LCB is reset on entry to handle_verify_cap(), so this must * be applied on every link up. * * Adjust LCB error kill enable to kill the link if * these RBUF errors are seen: * REPLAY_BUF_MBE_SMASK * FLIT_INPUT_BUF_MBE_SMASK */ if (is_ax(dd)) { /* fixed in B0 */ reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN); reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK; write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg); } /* pull LCB fifos out of reset - all fifo clocks must be stable */ write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0); /* give 8051 access to the LCB CSRs */ write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */ set_8051_lcb_access(dd); /* tell the 8051 to go to LinkUp */ set_link_state(ppd, HLS_GOING_UP); } /** * apply_link_downgrade_policy - Apply the link width downgrade enabled * policy against the current active link widths. * @ppd: info of physical Hfi port * @refresh_widths: True indicates link downgrade event * @return: True indicates a successful link downgrade. False indicates * link downgrade event failed and the link will bounce back to * default link width. * * Called when the enabled policy changes or the active link widths * change. * Refresh_widths indicates that a link downgrade occurred. The * link_downgraded variable is set by refresh_widths and * determines the success/failure of the policy application. */ bool apply_link_downgrade_policy(struct hfi1_pportdata *ppd, bool refresh_widths) { int do_bounce = 0; int tries; u16 lwde; u16 tx, rx; bool link_downgraded = refresh_widths; /* use the hls lock to avoid a race with actual link up */ tries = 0; retry: mutex_lock(&ppd->hls_lock); /* only apply if the link is up */ if (ppd->host_link_state & HLS_DOWN) { /* still going up..wait and retry */ if (ppd->host_link_state & HLS_GOING_UP) { if (++tries < 1000) { mutex_unlock(&ppd->hls_lock); usleep_range(100, 120); /* arbitrary */ goto retry; } dd_dev_err(ppd->dd, "%s: giving up waiting for link state change\n", __func__); } goto done; } lwde = ppd->link_width_downgrade_enabled; if (refresh_widths) { get_link_widths(ppd->dd, &tx, &rx); ppd->link_width_downgrade_tx_active = tx; ppd->link_width_downgrade_rx_active = rx; } if (ppd->link_width_downgrade_tx_active == 0 || ppd->link_width_downgrade_rx_active == 0) { /* the 8051 reported a dead link as a downgrade */ dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n"); link_downgraded = false; } else if (lwde == 0) { /* downgrade is disabled */ /* bounce if not at starting active width */ if ((ppd->link_width_active != ppd->link_width_downgrade_tx_active) || (ppd->link_width_active != ppd->link_width_downgrade_rx_active)) { dd_dev_err(ppd->dd, "Link downgrade is disabled and link has downgraded, downing link\n"); dd_dev_err(ppd->dd, " original 0x%x, tx active 0x%x, rx active 0x%x\n", ppd->link_width_active, ppd->link_width_downgrade_tx_active, ppd->link_width_downgrade_rx_active); do_bounce = 1; link_downgraded = false; } } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 || (lwde & ppd->link_width_downgrade_rx_active) == 0) { /* Tx or Rx is outside the enabled policy */ dd_dev_err(ppd->dd, "Link is outside of downgrade allowed, downing link\n"); dd_dev_err(ppd->dd, " enabled 0x%x, tx active 0x%x, rx active 0x%x\n", lwde, ppd->link_width_downgrade_tx_active, ppd->link_width_downgrade_rx_active); do_bounce = 1; link_downgraded = false; } done: mutex_unlock(&ppd->hls_lock); if (do_bounce) { set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0, OPA_LINKDOWN_REASON_WIDTH_POLICY); set_link_state(ppd, HLS_DN_OFFLINE); start_link(ppd); } return link_downgraded; } /* * Handle a link downgrade interrupt from the 8051. * * This is a work-queue function outside of the interrupt. */ void handle_link_downgrade(struct work_struct *work) { struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, link_downgrade_work); dd_dev_info(ppd->dd, "8051: Link width downgrade\n"); if (apply_link_downgrade_policy(ppd, true)) update_xmit_counters(ppd, ppd->link_width_downgrade_tx_active); } static char *dcc_err_string(char *buf, int buf_len, u64 flags) { return flag_string(buf, buf_len, flags, dcc_err_flags, ARRAY_SIZE(dcc_err_flags)); } static char *lcb_err_string(char *buf, int buf_len, u64 flags) { return flag_string(buf, buf_len, flags, lcb_err_flags, ARRAY_SIZE(lcb_err_flags)); } static char *dc8051_err_string(char *buf, int buf_len, u64 flags) { return flag_string(buf, buf_len, flags, dc8051_err_flags, ARRAY_SIZE(dc8051_err_flags)); } static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags) { return flag_string(buf, buf_len, flags, dc8051_info_err_flags, ARRAY_SIZE(dc8051_info_err_flags)); } static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags) { return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags, ARRAY_SIZE(dc8051_info_host_msg_flags)); } static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg) { struct hfi1_pportdata *ppd = dd->pport; u64 info, err, host_msg; int queue_link_down = 0; char buf[96]; /* look at the flags */ if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) { /* 8051 information set by firmware */ /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */ info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051); err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT) & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK; host_msg = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT) & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK; /* * Handle error flags. */ if (err & FAILED_LNI) { /* * LNI error indications are cleared by the 8051 * only when starting polling. Only pay attention * to them when in the states that occur during * LNI. */ if (ppd->host_link_state & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) { queue_link_down = 1; dd_dev_info(dd, "Link error: %s\n", dc8051_info_err_string(buf, sizeof(buf), err & FAILED_LNI)); } err &= ~(u64)FAILED_LNI; } /* unknown frames can happen durning LNI, just count */ if (err & UNKNOWN_FRAME) { ppd->unknown_frame_count++; err &= ~(u64)UNKNOWN_FRAME; } if (err) { /* report remaining errors, but do not do anything */ dd_dev_err(dd, "8051 info error: %s\n", dc8051_info_err_string(buf, sizeof(buf), err)); } /* * Handle host message flags. */ if (host_msg & HOST_REQ_DONE) { /* * Presently, the driver does a busy wait for * host requests to complete. This is only an * informational message. * NOTE: The 8051 clears the host message * information *on the next 8051 command*. * Therefore, when linkup is achieved, * this flag will still be set. */ host_msg &= ~(u64)HOST_REQ_DONE; } if (host_msg & BC_SMA_MSG) { queue_work(ppd->link_wq, &ppd->sma_message_work); host_msg &= ~(u64)BC_SMA_MSG; } if (host_msg & LINKUP_ACHIEVED) { dd_dev_info(dd, "8051: Link up\n"); queue_work(ppd->link_wq, &ppd->link_up_work); host_msg &= ~(u64)LINKUP_ACHIEVED; } if (host_msg & EXT_DEVICE_CFG_REQ) { handle_8051_request(ppd); host_msg &= ~(u64)EXT_DEVICE_CFG_REQ; } if (host_msg & VERIFY_CAP_FRAME) { queue_work(ppd->link_wq, &ppd->link_vc_work); host_msg &= ~(u64)VERIFY_CAP_FRAME; } if (host_msg & LINK_GOING_DOWN) { const char *extra = ""; /* no downgrade action needed if going down */ if (host_msg & LINK_WIDTH_DOWNGRADED) { host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED; extra = " (ignoring downgrade)"; } dd_dev_info(dd, "8051: Link down%s\n", extra); queue_link_down = 1; host_msg &= ~(u64)LINK_GOING_DOWN; } if (host_msg & LINK_WIDTH_DOWNGRADED) { queue_work(ppd->link_wq, &ppd->link_downgrade_work); host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED; } if (host_msg) { /* report remaining messages, but do not do anything */ dd_dev_info(dd, "8051 info host message: %s\n", dc8051_info_host_msg_string(buf, sizeof(buf), host_msg)); } reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK; } if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) { /* * Lost the 8051 heartbeat. If this happens, we * receive constant interrupts about it. Disable * the interrupt after the first. */ dd_dev_err(dd, "Lost 8051 heartbeat\n"); write_csr(dd, DC_DC8051_ERR_EN, read_csr(dd, DC_DC8051_ERR_EN) & ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK); reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK; } if (reg) { /* report the error, but do not do anything */ dd_dev_err(dd, "8051 error: %s\n", dc8051_err_string(buf, sizeof(buf), reg)); } if (queue_link_down) { /* * if the link is already going down or disabled, do not * queue another. If there's a link down entry already * queued, don't queue another one. */ if ((ppd->host_link_state & (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) || ppd->link_enabled == 0) { dd_dev_info(dd, "%s: not queuing link down. host_link_state %x, link_enabled %x\n", __func__, ppd->host_link_state, ppd->link_enabled); } else { if (xchg(&ppd->is_link_down_queued, 1) == 1) dd_dev_info(dd, "%s: link down request already queued\n", __func__); else queue_work(ppd->link_wq, &ppd->link_down_work); } } } static const char * const fm_config_txt[] = { [0] = "BadHeadDist: Distance violation between two head flits", [1] = "BadTailDist: Distance violation between two tail flits", [2] = "BadCtrlDist: Distance violation between two credit control flits", [3] = "BadCrdAck: Credits return for unsupported VL", [4] = "UnsupportedVLMarker: Received VL Marker", [5] = "BadPreempt: Exceeded the preemption nesting level", [6] = "BadControlFlit: Received unsupported control flit", /* no 7 */ [8] = "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL", }; static const char * const port_rcv_txt[] = { [1] = "BadPktLen: Illegal PktLen", [2] = "PktLenTooLong: Packet longer than PktLen", [3] = "PktLenTooShort: Packet shorter than PktLen", [4] = "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)", [5] = "BadDLID: Illegal DLID (0, doesn't match HFI)", [6] = "BadL2: Illegal L2 opcode", [7] = "BadSC: Unsupported SC", [9] = "BadRC: Illegal RC", [11] = "PreemptError: Preempting with same VL", [12] = "PreemptVL15: Preempting a VL15 packet", }; #define OPA_LDR_FMCONFIG_OFFSET 16 #define OPA_LDR_PORTRCV_OFFSET 0 static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg) { u64 info, hdr0, hdr1; const char *extra; char buf[96]; struct hfi1_pportdata *ppd = dd->pport; u8 lcl_reason = 0; int do_bounce = 0; if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) { if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) { info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE); dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK; /* set status bit */ dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK; } reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK; } if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) { struct hfi1_pportdata *ppd = dd->pport; /* this counter saturates at (2^32) - 1 */ if (ppd->link_downed < (u32)UINT_MAX) ppd->link_downed++; reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK; } if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) { u8 reason_valid = 1; info = read_csr(dd, DCC_ERR_INFO_FMCONFIG); if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) { dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK; /* set status bit */ dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK; } switch (info) { case 0: case 1: case 2: case 3: case 4: case 5: case 6: extra = fm_config_txt[info]; break; case 8: extra = fm_config_txt[info]; if (ppd->port_error_action & OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) { do_bounce = 1; /* * lcl_reason cannot be derived from info * for this error */ lcl_reason = OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER; } break; default: reason_valid = 0; snprintf(buf, sizeof(buf), "reserved%lld", info); extra = buf; break; } if (reason_valid && !do_bounce) { do_bounce = ppd->port_error_action & (1 << (OPA_LDR_FMCONFIG_OFFSET + info)); lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST; } /* just report this */ dd_dev_info_ratelimited(dd, "DCC Error: fmconfig error: %s\n", extra); reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK; } if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) { u8 reason_valid = 1; info = read_csr(dd, DCC_ERR_INFO_PORTRCV); hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0); hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1); if (!(dd->err_info_rcvport.status_and_code & OPA_EI_STATUS_SMASK)) { dd->err_info_rcvport.status_and_code = info & OPA_EI_CODE_SMASK; /* set status bit */ dd->err_info_rcvport.status_and_code |= OPA_EI_STATUS_SMASK; /* * save first 2 flits in the packet that caused * the error */ dd->err_info_rcvport.packet_flit1 = hdr0; dd->err_info_rcvport.packet_flit2 = hdr1; } switch (info) { case 1: case 2: case 3: case 4: case 5: case 6: case 7: case 9: case 11: case 12: extra = port_rcv_txt[info]; break; default: reason_valid = 0; snprintf(buf, sizeof(buf), "reserved%lld", info); extra = buf; break; } if (reason_valid && !do_bounce) { do_bounce = ppd->port_error_action & (1 << (OPA_LDR_PORTRCV_OFFSET + info)); lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0; } /* just report this */ dd_dev_info_ratelimited(dd, "DCC Error: PortRcv error: %s\n" " hdr0 0x%llx, hdr1 0x%llx\n", extra, hdr0, hdr1); reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK; } if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) { /* informative only */ dd_dev_info_ratelimited(dd, "8051 access to LCB blocked\n"); reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK; } if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) { /* informative only */ dd_dev_info_ratelimited(dd, "host access to LCB blocked\n"); reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK; } if (unlikely(hfi1_dbg_fault_suppress_err(&dd->verbs_dev))) reg &= ~DCC_ERR_FLG_LATE_EBP_ERR_SMASK; /* report any remaining errors */ if (reg) dd_dev_info_ratelimited(dd, "DCC Error: %s\n", dcc_err_string(buf, sizeof(buf), reg)); if (lcl_reason == 0) lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN; if (do_bounce) { dd_dev_info_ratelimited(dd, "%s: PortErrorAction bounce\n", __func__); set_link_down_reason(ppd, lcl_reason, 0, lcl_reason); queue_work(ppd->link_wq, &ppd->link_bounce_work); } } static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg) { char buf[96]; dd_dev_info(dd, "LCB Error: %s\n", lcb_err_string(buf, sizeof(buf), reg)); } /* * CCE block DC interrupt. Source is < 8. */ static void is_dc_int(struct hfi1_devdata *dd, unsigned int source) { const struct err_reg_info *eri = &dc_errs[source]; if (eri->handler) { interrupt_clear_down(dd, 0, eri); } else if (source == 3 /* dc_lbm_int */) { /* * This indicates that a parity error has occurred on the * address/control lines presented to the LBM. The error * is a single pulse, there is no associated error flag, * and it is non-maskable. This is because if a parity * error occurs on the request the request is dropped. * This should never occur, but it is nice to know if it * ever does. */ dd_dev_err(dd, "Parity error in DC LBM block\n"); } else { dd_dev_err(dd, "Invalid DC interrupt %u\n", source); } } /* * TX block send credit interrupt. Source is < 160. */ static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source) { sc_group_release_update(dd, source); } /* * TX block SDMA interrupt. Source is < 48. * * SDMA interrupts are grouped by type: * * 0 - N-1 = SDma * N - 2N-1 = SDmaProgress * 2N - 3N-1 = SDmaIdle */ static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source) { /* what interrupt */ unsigned int what = source / TXE_NUM_SDMA_ENGINES; /* which engine */ unsigned int which = source % TXE_NUM_SDMA_ENGINES; #ifdef CONFIG_SDMA_VERBOSITY dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which, slashstrip(__FILE__), __LINE__, __func__); sdma_dumpstate(&dd->per_sdma[which]); #endif if (likely(what < 3 && which < dd->num_sdma)) { sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source); } else { /* should not happen */ dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source); } } /** * is_rcv_avail_int() - User receive context available IRQ handler * @dd: valid dd * @source: logical IRQ source (offset from IS_RCVAVAIL_START) * * RX block receive available interrupt. Source is < 160. * * This is the general interrupt handler for user (PSM) receive contexts, * and can only be used for non-threaded IRQs. */ static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source) { struct hfi1_ctxtdata *rcd; char *err_detail; if (likely(source < dd->num_rcv_contexts)) { rcd = hfi1_rcd_get_by_index(dd, source); if (rcd) { handle_user_interrupt(rcd); hfi1_rcd_put(rcd); return; /* OK */ } /* received an interrupt, but no rcd */ err_detail = "dataless"; } else { /* received an interrupt, but are not using that context */ err_detail = "out of range"; } dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n", err_detail, source); } /** * is_rcv_urgent_int() - User receive context urgent IRQ handler * @dd: valid dd * @source: logical IRQ source (offset from IS_RCVURGENT_START) * * RX block receive urgent interrupt. Source is < 160. * * NOTE: kernel receive contexts specifically do NOT enable this IRQ. */ static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source) { struct hfi1_ctxtdata *rcd; char *err_detail; if (likely(source < dd->num_rcv_contexts)) { rcd = hfi1_rcd_get_by_index(dd, source); if (rcd) { handle_user_interrupt(rcd); hfi1_rcd_put(rcd); return; /* OK */ } /* received an interrupt, but no rcd */ err_detail = "dataless"; } else { /* received an interrupt, but are not using that context */ err_detail = "out of range"; } dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n", err_detail, source); } /* * Reserved range interrupt. Should not be called in normal operation. */ static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source) { char name[64]; dd_dev_err(dd, "unexpected %s interrupt\n", is_reserved_name(name, sizeof(name), source)); } static const struct is_table is_table[] = { /* * start end * name func interrupt func */ { IS_GENERAL_ERR_START, IS_GENERAL_ERR_END, is_misc_err_name, is_misc_err_int }, { IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END, is_sdma_eng_err_name, is_sdma_eng_err_int }, { IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END, is_sendctxt_err_name, is_sendctxt_err_int }, { IS_SDMA_START, IS_SDMA_IDLE_END, is_sdma_eng_name, is_sdma_eng_int }, { IS_VARIOUS_START, IS_VARIOUS_END, is_various_name, is_various_int }, { IS_DC_START, IS_DC_END, is_dc_name, is_dc_int }, { IS_RCVAVAIL_START, IS_RCVAVAIL_END, is_rcv_avail_name, is_rcv_avail_int }, { IS_RCVURGENT_START, IS_RCVURGENT_END, is_rcv_urgent_name, is_rcv_urgent_int }, { IS_SENDCREDIT_START, IS_SENDCREDIT_END, is_send_credit_name, is_send_credit_int}, { IS_RESERVED_START, IS_RESERVED_END, is_reserved_name, is_reserved_int}, }; /* * Interrupt source interrupt - called when the given source has an interrupt. * Source is a bit index into an array of 64-bit integers. */ static void is_interrupt(struct hfi1_devdata *dd, unsigned int source) { const struct is_table *entry; /* avoids a double compare by walking the table in-order */ for (entry = &is_table[0]; entry->is_name; entry++) { if (source <= entry->end) { trace_hfi1_interrupt(dd, entry, source); entry->is_int(dd, source - entry->start); return; } } /* fell off the end */ dd_dev_err(dd, "invalid interrupt source %u\n", source); } /** * general_interrupt - General interrupt handler * @irq: MSIx IRQ vector * @data: hfi1 devdata * * This is able to correctly handle all non-threaded interrupts. Receive * context DATA IRQs are threaded and are not supported by this handler. * */ irqreturn_t general_interrupt(int irq, void *data) { struct hfi1_devdata *dd = data; u64 regs[CCE_NUM_INT_CSRS]; u32 bit; int i; irqreturn_t handled = IRQ_NONE; this_cpu_inc(*dd->int_counter); /* phase 1: scan and clear all handled interrupts */ for (i = 0; i < CCE_NUM_INT_CSRS; i++) { if (dd->gi_mask[i] == 0) { regs[i] = 0; /* used later */ continue; } regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) & dd->gi_mask[i]; /* only clear if anything is set */ if (regs[i]) write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]); } /* phase 2: call the appropriate handler */ for_each_set_bit(bit, (unsigned long *)&regs[0], CCE_NUM_INT_CSRS * 64) { is_interrupt(dd, bit); handled = IRQ_HANDLED; } return handled; } irqreturn_t sdma_interrupt(int irq, void *data) { struct sdma_engine *sde = data; struct hfi1_devdata *dd = sde->dd; u64 status; #ifdef CONFIG_SDMA_VERBOSITY dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); sdma_dumpstate(sde); #endif this_cpu_inc(*dd->int_counter); /* This read_csr is really bad in the hot path */ status = read_csr(dd, CCE_INT_STATUS + (8 * (IS_SDMA_START / 64))) & sde->imask; if (likely(status)) { /* clear the interrupt(s) */ write_csr(dd, CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)), status); /* handle the interrupt(s) */ sdma_engine_interrupt(sde, status); } else { dd_dev_info_ratelimited(dd, "SDMA engine %u interrupt, but no status bits set\n", sde->this_idx); } return IRQ_HANDLED; } /* * Clear the receive interrupt. Use a read of the interrupt clear CSR * to insure that the write completed. This does NOT guarantee that * queued DMA writes to memory from the chip are pushed. */ static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd) { struct hfi1_devdata *dd = rcd->dd; u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg); write_csr(dd, addr, rcd->imask); /* force the above write on the chip and get a value back */ (void)read_csr(dd, addr); } /* force the receive interrupt */ void force_recv_intr(struct hfi1_ctxtdata *rcd) { write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask); } /* * Return non-zero if a packet is present. * * This routine is called when rechecking for packets after the RcvAvail * interrupt has been cleared down. First, do a quick check of memory for * a packet present. If not found, use an expensive CSR read of the context * tail to determine the actual tail. The CSR read is necessary because there * is no method to push pending DMAs to memory other than an interrupt and we * are trying to determine if we need to force an interrupt. */ static inline int check_packet_present(struct hfi1_ctxtdata *rcd) { u32 tail; if (hfi1_packet_present(rcd)) return 1; /* fall back to a CSR read, correct indpendent of DMA_RTAIL */ tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL); return hfi1_rcd_head(rcd) != tail; } /* * Common code for receive contexts interrupt handlers. * Update traces, increment kernel IRQ counter and * setup ASPM when needed. */ static void receive_interrupt_common(struct hfi1_ctxtdata *rcd) { struct hfi1_devdata *dd = rcd->dd; trace_hfi1_receive_interrupt(dd, rcd); this_cpu_inc(*dd->int_counter); aspm_ctx_disable(rcd); } /* * __hfi1_rcd_eoi_intr() - Make HW issue receive interrupt * when there are packets present in the queue. When calling * with interrupts enabled please use hfi1_rcd_eoi_intr. * * @rcd: valid receive context */ static void __hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd) { if (!rcd->rcvhdrq) return; clear_recv_intr(rcd); if (check_packet_present(rcd)) force_recv_intr(rcd); } /** * hfi1_rcd_eoi_intr() - End of Interrupt processing action * * @rcd: Ptr to hfi1_ctxtdata of receive context * * Hold IRQs so we can safely clear the interrupt and * recheck for a packet that may have arrived after the previous * check and the interrupt clear. If a packet arrived, force another * interrupt. This routine can be called at the end of receive packet * processing in interrupt service routines, interrupt service thread * and softirqs */ static void hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd) { unsigned long flags; local_irq_save(flags); __hfi1_rcd_eoi_intr(rcd); local_irq_restore(flags); } /** * hfi1_netdev_rx_napi - napi poll function to move eoi inline * @napi: pointer to napi object * @budget: netdev budget */ int hfi1_netdev_rx_napi(struct napi_struct *napi, int budget) { struct hfi1_netdev_rxq *rxq = container_of(napi, struct hfi1_netdev_rxq, napi); struct hfi1_ctxtdata *rcd = rxq->rcd; int work_done = 0; work_done = rcd->do_interrupt(rcd, budget); if (work_done < budget) { napi_complete_done(napi, work_done); hfi1_rcd_eoi_intr(rcd); } return work_done; } /* Receive packet napi handler for netdevs VNIC and AIP */ irqreturn_t receive_context_interrupt_napi(int irq, void *data) { struct hfi1_ctxtdata *rcd = data; receive_interrupt_common(rcd); if (likely(rcd->napi)) { if (likely(napi_schedule_prep(rcd->napi))) __napi_schedule_irqoff(rcd->napi); else __hfi1_rcd_eoi_intr(rcd); } else { WARN_ONCE(1, "Napi IRQ handler without napi set up ctxt=%d\n", rcd->ctxt); __hfi1_rcd_eoi_intr(rcd); } return IRQ_HANDLED; } /* * Receive packet IRQ handler. This routine expects to be on its own IRQ. * This routine will try to handle packets immediately (latency), but if * it finds too many, it will invoke the thread handler (bandwitdh). The * chip receive interrupt is *not* cleared down until this or the thread (if * invoked) is finished. The intent is to avoid extra interrupts while we * are processing packets anyway. */ irqreturn_t receive_context_interrupt(int irq, void *data) { struct hfi1_ctxtdata *rcd = data; int disposition; receive_interrupt_common(rcd); /* receive interrupt remains blocked while processing packets */ disposition = rcd->do_interrupt(rcd, 0); /* * Too many packets were seen while processing packets in this * IRQ handler. Invoke the handler thread. The receive interrupt * remains blocked. */ if (disposition == RCV_PKT_LIMIT) return IRQ_WAKE_THREAD; __hfi1_rcd_eoi_intr(rcd); return IRQ_HANDLED; } /* * Receive packet thread handler. This expects to be invoked with the * receive interrupt still blocked. */ irqreturn_t receive_context_thread(int irq, void *data) { struct hfi1_ctxtdata *rcd = data; /* receive interrupt is still blocked from the IRQ handler */ (void)rcd->do_interrupt(rcd, 1); hfi1_rcd_eoi_intr(rcd); return IRQ_HANDLED; } /* ========================================================================= */ u32 read_physical_state(struct hfi1_devdata *dd) { u64 reg; reg = read_csr(dd, DC_DC8051_STS_CUR_STATE); return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT) & DC_DC8051_STS_CUR_STATE_PORT_MASK; } u32 read_logical_state(struct hfi1_devdata *dd) { u64 reg; reg = read_csr(dd, DCC_CFG_PORT_CONFIG); return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT) & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK; } static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate) { u64 reg; reg = read_csr(dd, DCC_CFG_PORT_CONFIG); /* clear current state, set new state */ reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK; reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT; write_csr(dd, DCC_CFG_PORT_CONFIG, reg); } /* * Use the 8051 to read a LCB CSR. */ static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data) { u32 regno; int ret; if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) { if (acquire_lcb_access(dd, 0) == 0) { *data = read_csr(dd, addr); release_lcb_access(dd, 0); return 0; } return -EBUSY; } /* register is an index of LCB registers: (offset - base) / 8 */ regno = (addr - DC_LCB_CFG_RUN) >> 3; ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data); if (ret != HCMD_SUCCESS) return -EBUSY; return 0; } /* * Provide a cache for some of the LCB registers in case the LCB is * unavailable. * (The LCB is unavailable in certain link states, for example.) */ struct lcb_datum { u32 off; u64 val; }; static struct lcb_datum lcb_cache[] = { { DC_LCB_ERR_INFO_RX_REPLAY_CNT, 0}, { DC_LCB_ERR_INFO_SEQ_CRC_CNT, 0 }, { DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT, 0 }, }; static void update_lcb_cache(struct hfi1_devdata *dd) { int i; int ret; u64 val; for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) { ret = read_lcb_csr(dd, lcb_cache[i].off, &val); /* Update if we get good data */ if (likely(ret != -EBUSY)) lcb_cache[i].val = val; } } static int read_lcb_cache(u32 off, u64 *val) { int i; for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) { if (lcb_cache[i].off == off) { *val = lcb_cache[i].val; return 0; } } pr_warn("%s bad offset 0x%x\n", __func__, off); return -1; } /* * Read an LCB CSR. Access may not be in host control, so check. * Return 0 on success, -EBUSY on failure. */ int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data) { struct hfi1_pportdata *ppd = dd->pport; /* if up, go through the 8051 for the value */ if (ppd->host_link_state & HLS_UP) return read_lcb_via_8051(dd, addr, data); /* if going up or down, check the cache, otherwise, no access */ if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) { if (read_lcb_cache(addr, data)) return -EBUSY; return 0; } /* otherwise, host has access */ *data = read_csr(dd, addr); return 0; } /* * Use the 8051 to write a LCB CSR. */ static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data) { u32 regno; int ret; if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || (dd->dc8051_ver < dc8051_ver(0, 20, 0))) { if (acquire_lcb_access(dd, 0) == 0) { write_csr(dd, addr, data); release_lcb_access(dd, 0); return 0; } return -EBUSY; } /* register is an index of LCB registers: (offset - base) / 8 */ regno = (addr - DC_LCB_CFG_RUN) >> 3; ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data); if (ret != HCMD_SUCCESS) return -EBUSY; return 0; } /* * Write an LCB CSR. Access may not be in host control, so check. * Return 0 on success, -EBUSY on failure. */ int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data) { struct hfi1_pportdata *ppd = dd->pport; /* if up, go through the 8051 for the value */ if (ppd->host_link_state & HLS_UP) return write_lcb_via_8051(dd, addr, data); /* if going up or down, no access */ if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) return -EBUSY; /* otherwise, host has access */ write_csr(dd, addr, data); return 0; } /* * Returns: * < 0 = Linux error, not able to get access * > 0 = 8051 command RETURN_CODE */ static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data, u64 *out_data) { u64 reg, completed; int return_code; unsigned long timeout; hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data); mutex_lock(&dd->dc8051_lock); /* We can't send any commands to the 8051 if it's in reset */ if (dd->dc_shutdown) { return_code = -ENODEV; goto fail; } /* * If an 8051 host command timed out previously, then the 8051 is * stuck. * * On first timeout, attempt to reset and restart the entire DC * block (including 8051). (Is this too big of a hammer?) * * If the 8051 times out a second time, the reset did not bring it * back to healthy life. In that case, fail any subsequent commands. */ if (dd->dc8051_timed_out) { if (dd->dc8051_timed_out > 1) { dd_dev_err(dd, "Previous 8051 host command timed out, skipping command %u\n", type); return_code = -ENXIO; goto fail; } _dc_shutdown(dd); _dc_start(dd); } /* * If there is no timeout, then the 8051 command interface is * waiting for a command. */ /* * When writing a LCB CSR, out_data contains the full value to * be written, while in_data contains the relative LCB * address in 7:0. Do the work here, rather than the caller, * of distrubting the write data to where it needs to go: * * Write data * 39:00 -> in_data[47:8] * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA */ if (type == HCMD_WRITE_LCB_CSR) { in_data |= ((*out_data) & 0xffffffffffull) << 8; /* must preserve COMPLETED - it is tied to hardware */ reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0); reg &= DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK; reg |= ((((*out_data) >> 40) & 0xff) << DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT) | ((((*out_data) >> 48) & 0xffff) << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT); write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg); } /* * Do two writes: the first to stabilize the type and req_data, the * second to activate. */ reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK) << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK) << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT; write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg); reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK; write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg); /* wait for completion, alternate: interrupt */ timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT); while (1) { reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1); completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK; if (completed) break; if (time_after(jiffies, timeout)) { dd->dc8051_timed_out++; dd_dev_err(dd, "8051 host command %u timeout\n", type); if (out_data) *out_data = 0; return_code = -ETIMEDOUT; goto fail; } udelay(2); } if (out_data) { *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT) & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK; if (type == HCMD_READ_LCB_CSR) { /* top 16 bits are in a different register */ *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1) & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK) << (48 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT); } } return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT) & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK; dd->dc8051_timed_out = 0; /* * Clear command for next user. */ write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0); fail: mutex_unlock(&dd->dc8051_lock); return return_code; } static int set_physical_link_state(struct hfi1_devdata *dd, u64 state) { return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL); } int load_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id, u32 config_data) { u64 data; int ret; data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT | (u64)config_data << LOAD_DATA_DATA_SHIFT; ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL); if (ret != HCMD_SUCCESS) { dd_dev_err(dd, "load 8051 config: field id %d, lane %d, err %d\n", (int)field_id, (int)lane_id, ret); } return ret; } /* * Read the 8051 firmware "registers". Use the RAM directly. Always * set the result, even on error. * Return 0 on success, -errno on failure */ int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id, u32 *result) { u64 big_data; u32 addr; int ret; /* address start depends on the lane_id */ if (lane_id < 4) addr = (4 * NUM_GENERAL_FIELDS) + (lane_id * 4 * NUM_LANE_FIELDS); else addr = 0; addr += field_id * 4; /* read is in 8-byte chunks, hardware will truncate the address down */ ret = read_8051_data(dd, addr, 8, &big_data); if (ret == 0) { /* extract the 4 bytes we want */ if (addr & 0x4) *result = (u32)(big_data >> 32); else *result = (u32)big_data; } else { *result = 0; dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n", __func__, lane_id, field_id); } return ret; } static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management, u8 continuous) { u32 frame; frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT | power_management << POWER_MANAGEMENT_SHIFT; return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY, GENERAL_CONFIG, frame); } static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu, u16 vl15buf, u8 crc_sizes) { u32 frame; frame = (u32)vau << VAU_SHIFT | (u32)z << Z_SHIFT | (u32)vcu << VCU_SHIFT | (u32)vl15buf << VL15BUF_SHIFT | (u32)crc_sizes << CRC_SIZES_SHIFT; return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC, GENERAL_CONFIG, frame); } static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits, u8 *flag_bits, u16 *link_widths) { u32 frame; read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG, &frame); *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK; *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK; *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK; } static int write_vc_local_link_mode(struct hfi1_devdata *dd, u8 misc_bits, u8 flag_bits, u16 link_widths) { u32 frame; frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT | (u32)link_widths << LINK_WIDTH_SHIFT; return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG, frame); } static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id, u8 device_rev) { u32 frame; frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT) | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT); return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame); } static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id, u8 *device_rev) { u32 frame; read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame); *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK; *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT) & REMOTE_DEVICE_REV_MASK; } int write_host_interface_version(struct hfi1_devdata *dd, u8 version) { u32 frame; u32 mask; mask = (HOST_INTERFACE_VERSION_MASK << HOST_INTERFACE_VERSION_SHIFT); read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame); /* Clear, then set field */ frame &= ~mask; frame |= ((u32)version << HOST_INTERFACE_VERSION_SHIFT); return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, frame); } void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor, u8 *ver_patch) { u32 frame; read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame); *ver_major = (frame >> STS_FM_VERSION_MAJOR_SHIFT) & STS_FM_VERSION_MAJOR_MASK; *ver_minor = (frame >> STS_FM_VERSION_MINOR_SHIFT) & STS_FM_VERSION_MINOR_MASK; read_8051_config(dd, VERSION_PATCH, GENERAL_CONFIG, &frame); *ver_patch = (frame >> STS_FM_VERSION_PATCH_SHIFT) & STS_FM_VERSION_PATCH_MASK; } static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management, u8 *continuous) { u32 frame; read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame); *power_management = (frame >> POWER_MANAGEMENT_SHIFT) & POWER_MANAGEMENT_MASK; *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT) & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK; } static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z, u8 *vcu, u16 *vl15buf, u8 *crc_sizes) { u32 frame; read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame); *vau = (frame >> VAU_SHIFT) & VAU_MASK; *z = (frame >> Z_SHIFT) & Z_MASK; *vcu = (frame >> VCU_SHIFT) & VCU_MASK; *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK; *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK; } static void read_vc_remote_link_width(struct hfi1_devdata *dd, u8 *remote_tx_rate, u16 *link_widths) { u32 frame; read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG, &frame); *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT) & REMOTE_TX_RATE_MASK; *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK; } static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx) { u32 frame; read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame); *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK; } static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls) { read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls); } static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs) { read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs); } void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality) { u32 frame; int ret; *link_quality = 0; if (dd->pport->host_link_state & HLS_UP) { ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame); if (ret == 0) *link_quality = (frame >> LINK_QUALITY_SHIFT) & LINK_QUALITY_MASK; } } static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc) { u32 frame; read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame); *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK; } static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr) { u32 frame; read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame); *ldr = (frame & 0xff); } static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx, u8 *tx_polarity_inversion, u8 *rx_polarity_inversion, u8 *max_rate) { u32 frame; int ret; ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame); *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT) & ENABLE_LANE_TX_MASK; *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT) & TX_POLARITY_INVERSION_MASK; *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT) & RX_POLARITY_INVERSION_MASK; *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK; return ret; } static int write_tx_settings(struct hfi1_devdata *dd, u8 enable_lane_tx, u8 tx_polarity_inversion, u8 rx_polarity_inversion, u8 max_rate) { u32 frame; /* no need to mask, all variable sizes match field widths */ frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT | max_rate << MAX_RATE_SHIFT; return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame); } /* * Read an idle LCB message. * * Returns 0 on success, -EINVAL on error */ static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out) { int ret; ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out); if (ret != HCMD_SUCCESS) { dd_dev_err(dd, "read idle message: type %d, err %d\n", (u32)type, ret); return -EINVAL; } dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out); /* return only the payload as we already know the type */ *data_out >>= IDLE_PAYLOAD_SHIFT; return 0; } /* * Read an idle SMA message. To be done in response to a notification from * the 8051. * * Returns 0 on success, -EINVAL on error */ static int read_idle_sma(struct hfi1_devdata *dd, u64 *data) { return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT, data); } /* * Send an idle LCB message. * * Returns 0 on success, -EINVAL on error */ static int send_idle_message(struct hfi1_devdata *dd, u64 data) { int ret; dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data); ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL); if (ret != HCMD_SUCCESS) { dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n", data, ret); return -EINVAL; } return 0; } /* * Send an idle SMA message. * * Returns 0 on success, -EINVAL on error */ int send_idle_sma(struct hfi1_devdata *dd, u64 message) { u64 data; data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) | ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT); return send_idle_message(dd, data); } /* * Initialize the LCB then do a quick link up. This may or may not be * in loopback. * * return 0 on success, -errno on error */ static int do_quick_linkup(struct hfi1_devdata *dd) { int ret; lcb_shutdown(dd, 0); if (loopback) { /* LCB_CFG_LOOPBACK.VAL = 2 */ /* LCB_CFG_LANE_WIDTH.VAL = 0 */ write_csr(dd, DC_LCB_CFG_LOOPBACK, IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT); write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0); } /* start the LCBs */ /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */ write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0); /* simulator only loopback steps */ if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) { /* LCB_CFG_RUN.EN = 1 */ write_csr(dd, DC_LCB_CFG_RUN, 1ull << DC_LCB_CFG_RUN_EN_SHIFT); ret = wait_link_transfer_active(dd, 10); if (ret) return ret; write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT); } if (!loopback) { /* * When doing quick linkup and not in loopback, both * sides must be done with LCB set-up before either * starts the quick linkup. Put a delay here so that * both sides can be started and have a chance to be * done with LCB set up before resuming. */ dd_dev_err(dd, "Pausing for peer to be finished with LCB set up\n"); msleep(5000); dd_dev_err(dd, "Continuing with quick linkup\n"); } write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */ set_8051_lcb_access(dd); /* * State "quick" LinkUp request sets the physical link state to * LinkUp without a verify capability sequence. * This state is in simulator v37 and later. */ ret = set_physical_link_state(dd, PLS_QUICK_LINKUP); if (ret != HCMD_SUCCESS) { dd_dev_err(dd, "%s: set physical link state to quick LinkUp failed with return %d\n", __func__, ret); set_host_lcb_access(dd); write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */ if (ret >= 0) ret = -EINVAL; return ret; } return 0; /* success */ } /* * Do all special steps to set up loopback. */ static int init_loopback(struct hfi1_devdata *dd) { dd_dev_info(dd, "Entering loopback mode\n"); /* all loopbacks should disable self GUID check */ write_csr(dd, DC_DC8051_CFG_MODE, (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK)); /* * The simulator has only one loopback option - LCB. Switch * to that option, which includes quick link up. * * Accept all valid loopback values. */ if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) && (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB || loopback == LOOPBACK_CABLE)) { loopback = LOOPBACK_LCB; quick_linkup = 1; return 0; } /* * SerDes loopback init sequence is handled in set_local_link_attributes */ if (loopback == LOOPBACK_SERDES) return 0; /* LCB loopback - handled at poll time */ if (loopback == LOOPBACK_LCB) { quick_linkup = 1; /* LCB is always quick linkup */ /* not supported in emulation due to emulation RTL changes */ if (dd->icode == ICODE_FPGA_EMULATION) { dd_dev_err(dd, "LCB loopback not supported in emulation\n"); return -EINVAL; } return 0; } /* external cable loopback requires no extra steps */ if (loopback == LOOPBACK_CABLE) return 0; dd_dev_err(dd, "Invalid loopback mode %d\n", loopback); return -EINVAL; } /* * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits * used in the Verify Capability link width attribute. */ static u16 opa_to_vc_link_widths(u16 opa_widths) { int i; u16 result = 0; static const struct link_bits { u16 from; u16 to; } opa_link_xlate[] = { { OPA_LINK_WIDTH_1X, 1 << (1 - 1) }, { OPA_LINK_WIDTH_2X, 1 << (2 - 1) }, { OPA_LINK_WIDTH_3X, 1 << (3 - 1) }, { OPA_LINK_WIDTH_4X, 1 << (4 - 1) }, }; for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) { if (opa_widths & opa_link_xlate[i].from) result |= opa_link_xlate[i].to; } return result; } /* * Set link attributes before moving to polling. */ static int set_local_link_attributes(struct hfi1_pportdata *ppd) { struct hfi1_devdata *dd = ppd->dd; u8 enable_lane_tx; u8 tx_polarity_inversion; u8 rx_polarity_inversion; int ret; u32 misc_bits = 0; /* reset our fabric serdes to clear any lingering problems */ fabric_serdes_reset(dd); /* set the local tx rate - need to read-modify-write */ ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion, &rx_polarity_inversion, &ppd->local_tx_rate); if (ret) goto set_local_link_attributes_fail; if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) { /* set the tx rate to the fastest enabled */ if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G) ppd->local_tx_rate = 1; else ppd->local_tx_rate = 0; } else { /* set the tx rate to all enabled */ ppd->local_tx_rate = 0; if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G) ppd->local_tx_rate |= 2; if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G) ppd->local_tx_rate |= 1; } enable_lane_tx = 0xF; /* enable all four lanes */ ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion, rx_polarity_inversion, ppd->local_tx_rate); if (ret != HCMD_SUCCESS) goto set_local_link_attributes_fail; ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION); if (ret != HCMD_SUCCESS) { dd_dev_err(dd, "Failed to set host interface version, return 0x%x\n", ret); goto set_local_link_attributes_fail; } /* * DC supports continuous updates. */ ret = write_vc_local_phy(dd, 0 /* no power management */, 1 /* continuous updates */); if (ret != HCMD_SUCCESS) goto set_local_link_attributes_fail; /* z=1 in the next call: AU of 0 is not supported by the hardware */ ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init, ppd->port_crc_mode_enabled); if (ret != HCMD_SUCCESS) goto set_local_link_attributes_fail; /* * SerDes loopback init sequence requires * setting bit 0 of MISC_CONFIG_BITS */ if (loopback == LOOPBACK_SERDES) misc_bits |= 1 << LOOPBACK_SERDES_CONFIG_BIT_MASK_SHIFT; /* * An external device configuration request is used to reset the LCB * to retry to obtain operational lanes when the first attempt is * unsuccesful. */ if (dd->dc8051_ver >= dc8051_ver(1, 25, 0)) misc_bits |= 1 << EXT_CFG_LCB_RESET_SUPPORTED_SHIFT; ret = write_vc_local_link_mode(dd, misc_bits, 0, opa_to_vc_link_widths( ppd->link_width_enabled)); if (ret != HCMD_SUCCESS) goto set_local_link_attributes_fail; /* let peer know who we are */ ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev); if (ret == HCMD_SUCCESS) return 0; set_local_link_attributes_fail: dd_dev_err(dd, "Failed to set local link attributes, return 0x%x\n", ret); return ret; } /* * Call this to start the link. * Do not do anything if the link is disabled. * Returns 0 if link is disabled, moved to polling, or the driver is not ready. */ int start_link(struct hfi1_pportdata *ppd) { /* * Tune the SerDes to a ballpark setting for optimal signal and bit * error rate. Needs to be done before starting the link. */ tune_serdes(ppd); if (!ppd->driver_link_ready) { dd_dev_info(ppd->dd, "%s: stopping link start because driver is not ready\n", __func__); return 0; } /* * FULL_MGMT_P_KEY is cleared from the pkey table, so that the * pkey table can be configured properly if the HFI unit is connected * to switch port with MgmtAllowed=NO */ clear_full_mgmt_pkey(ppd); return set_link_state(ppd, HLS_DN_POLL); } static void wait_for_qsfp_init(struct hfi1_pportdata *ppd) { struct hfi1_devdata *dd = ppd->dd; u64 mask; unsigned long timeout; /* * Some QSFP cables have a quirk that asserts the IntN line as a side * effect of power up on plug-in. We ignore this false positive * interrupt until the module has finished powering up by waiting for * a minimum timeout of the module inrush initialization time of * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the * module have stabilized. */ msleep(500); /* * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1) */ timeout = jiffies + msecs_to_jiffies(2000); while (1) { mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_IN : ASIC_QSFP1_IN); if (!(mask & QSFP_HFI0_INT_N)) break; if (time_after(jiffies, timeout)) { dd_dev_info(dd, "%s: No IntN detected, reset complete\n", __func__); break; } udelay(2); } } static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable) { struct hfi1_devdata *dd = ppd->dd; u64 mask; mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK); if (enable) { /* * Clear the status register to avoid an immediate interrupt * when we re-enable the IntN pin */ write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR, QSFP_HFI0_INT_N); mask |= (u64)QSFP_HFI0_INT_N; } else { mask &= ~(u64)QSFP_HFI0_INT_N; } write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask); } int reset_qsfp(struct hfi1_pportdata *ppd) { struct hfi1_devdata *dd = ppd->dd; u64 mask, qsfp_mask; /* Disable INT_N from triggering QSFP interrupts */ set_qsfp_int_n(ppd, 0); /* Reset the QSFP */ mask = (u64)QSFP_HFI0_RESET_N; qsfp_mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT); qsfp_mask &= ~mask; write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask); udelay(10); qsfp_mask |= mask; write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask); wait_for_qsfp_init(ppd); /* * Allow INT_N to trigger the QSFP interrupt to watch * for alarms and warnings */ set_qsfp_int_n(ppd, 1); /* * After the reset, AOC transmitters are enabled by default. They need * to be turned off to complete the QSFP setup before they can be * enabled again. */ return set_qsfp_tx(ppd, 0); } static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd, u8 *qsfp_interrupt_status) { struct hfi1_devdata *dd = ppd->dd; if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) || (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING)) dd_dev_err(dd, "%s: QSFP cable temperature too high\n", __func__); if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) || (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING)) dd_dev_err(dd, "%s: QSFP cable temperature too low\n", __func__); /* * The remaining alarms/warnings don't matter if the link is down. */ if (ppd->host_link_state & HLS_DOWN) return 0; if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) || (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING)) dd_dev_err(dd, "%s: QSFP supply voltage too high\n", __func__); if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) || (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING)) dd_dev_err(dd, "%s: QSFP supply voltage too low\n", __func__); /* Byte 2 is vendor specific */ if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) || (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING)) dd_dev_err(dd, "%s: Cable RX channel 1/2 power too high\n", __func__); if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) || (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING)) dd_dev_err(dd, "%s: Cable RX channel 1/2 power too low\n", __func__); if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) || (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING)) dd_dev_err(dd, "%s: Cable RX channel 3/4 power too high\n", __func__); if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) || (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING)) dd_dev_err(dd, "%s: Cable RX channel 3/4 power too low\n", __func__); if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) || (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING)) dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too high\n", __func__); if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) || (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING)) dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too low\n", __func__); if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) || (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING)) dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too high\n", __func__); if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) || (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING)) dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too low\n", __func__); if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) || (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING)) dd_dev_err(dd, "%s: Cable TX channel 1/2 power too high\n", __func__); if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) || (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING)) dd_dev_err(dd, "%s: Cable TX channel 1/2 power too low\n", __func__); if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) || (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING)) dd_dev_err(dd, "%s: Cable TX channel 3/4 power too high\n", __func__); if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) || (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING)) dd_dev_err(dd, "%s: Cable TX channel 3/4 power too low\n", __func__); /* Bytes 9-10 and 11-12 are reserved */ /* Bytes 13-15 are vendor specific */ return 0; } /* This routine will only be scheduled if the QSFP module present is asserted */ void qsfp_event(struct work_struct *work) { struct qsfp_data *qd; struct hfi1_pportdata *ppd; struct hfi1_devdata *dd; qd = container_of(work, struct qsfp_data, qsfp_work); ppd = qd->ppd; dd = ppd->dd; /* Sanity check */ if (!qsfp_mod_present(ppd)) return; if (ppd->host_link_state == HLS_DN_DISABLE) { dd_dev_info(ppd->dd, "%s: stopping link start because link is disabled\n", __func__); return; } /* * Turn DC back on after cable has been re-inserted. Up until * now, the DC has been in reset to save power. */ dc_start(dd); if (qd->cache_refresh_required) { set_qsfp_int_n(ppd, 0); wait_for_qsfp_init(ppd); /* * Allow INT_N to trigger the QSFP interrupt to watch * for alarms and warnings */ set_qsfp_int_n(ppd, 1); start_link(ppd); } if (qd->check_interrupt_flags) { u8 qsfp_interrupt_status[16] = {0,}; if (one_qsfp_read(ppd, dd->hfi1_id, 6, &qsfp_interrupt_status[0], 16) != 16) { dd_dev_info(dd, "%s: Failed to read status of QSFP module\n", __func__); } else { unsigned long flags; handle_qsfp_error_conditions( ppd, qsfp_interrupt_status); spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); ppd->qsfp_info.check_interrupt_flags = 0; spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags); } } } void init_qsfp_int(struct hfi1_devdata *dd) { struct hfi1_pportdata *ppd = dd->pport; u64 qsfp_mask; qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N); /* Clear current status to avoid spurious interrupts */ write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR, qsfp_mask); write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, qsfp_mask); set_qsfp_int_n(ppd, 0); /* Handle active low nature of INT_N and MODPRST_N pins */ if (qsfp_mod_present(ppd)) qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N; write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT, qsfp_mask); /* Enable the appropriate QSFP IRQ source */ if (!dd->hfi1_id) set_intr_bits(dd, QSFP1_INT, QSFP1_INT, true); else set_intr_bits(dd, QSFP2_INT, QSFP2_INT, true); } /* * Do a one-time initialize of the LCB block. */ static void init_lcb(struct hfi1_devdata *dd) { /* simulator does not correctly handle LCB cclk loopback, skip */ if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) return; /* the DC has been reset earlier in the driver load */ /* set LCB for cclk loopback on the port */ write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01); write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00); write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00); write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110); write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08); write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02); write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00); } /* * Perform a test read on the QSFP. Return 0 on success, -ERRNO * on error. */ static int test_qsfp_read(struct hfi1_pportdata *ppd) { int ret; u8 status; /* * Report success if not a QSFP or, if it is a QSFP, but the cable is * not present */ if (ppd->port_type != PORT_TYPE_QSFP || !qsfp_mod_present(ppd)) return 0; /* read byte 2, the status byte */ ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1); if (ret < 0) return ret; if (ret != 1) return -EIO; return 0; /* success */ } /* * Values for QSFP retry. * * Give up after 10s (20 x 500ms). The overall timeout was empirically * arrived at from experience on a large cluster. */ #define MAX_QSFP_RETRIES 20 #define QSFP_RETRY_WAIT 500 /* msec */ /* * Try a QSFP read. If it fails, schedule a retry for later. * Called on first link activation after driver load. */ static void try_start_link(struct hfi1_pportdata *ppd) { if (test_qsfp_read(ppd)) { /* read failed */ if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) { dd_dev_err(ppd->dd, "QSFP not responding, giving up\n"); return; } dd_dev_info(ppd->dd, "QSFP not responding, waiting and retrying %d\n", (int)ppd->qsfp_retry_count); ppd->qsfp_retry_count++; queue_delayed_work(ppd->link_wq, &ppd->start_link_work, msecs_to_jiffies(QSFP_RETRY_WAIT)); return; } ppd->qsfp_retry_count = 0; start_link(ppd); } /* * Workqueue function to start the link after a delay. */ void handle_start_link(struct work_struct *work) { struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, start_link_work.work); try_start_link(ppd); } int bringup_serdes(struct hfi1_pportdata *ppd) { struct hfi1_devdata *dd = ppd->dd; u64 guid; int ret; if (HFI1_CAP_IS_KSET(EXTENDED_PSN)) add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK); guid = ppd->guids[HFI1_PORT_GUID_INDEX]; if (!guid) { if (dd->base_guid) guid = dd->base_guid + ppd->port - 1; ppd->guids[HFI1_PORT_GUID_INDEX] = guid; } /* Set linkinit_reason on power up per OPA spec */ ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP; /* one-time init of the LCB */ init_lcb(dd); if (loopback) { ret = init_loopback(dd); if (ret < 0) return ret; } get_port_type(ppd); if (ppd->port_type == PORT_TYPE_QSFP) { set_qsfp_int_n(ppd, 0); wait_for_qsfp_init(ppd); set_qsfp_int_n(ppd, 1); } try_start_link(ppd); return 0; } void hfi1_quiet_serdes(struct hfi1_pportdata *ppd) { struct hfi1_devdata *dd = ppd->dd; /* * Shut down the link and keep it down. First turn off that the * driver wants to allow the link to be up (driver_link_ready). * Then make sure the link is not automatically restarted * (link_enabled). Cancel any pending restart. And finally * go offline. */ ppd->driver_link_ready = 0; ppd->link_enabled = 0; ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */ flush_delayed_work(&ppd->start_link_work); cancel_delayed_work_sync(&ppd->start_link_work); ppd->offline_disabled_reason = HFI1_ODR_MASK(OPA_LINKDOWN_REASON_REBOOT); set_link_down_reason(ppd, OPA_LINKDOWN_REASON_REBOOT, 0, OPA_LINKDOWN_REASON_REBOOT); set_link_state(ppd, HLS_DN_OFFLINE); /* disable the port */ clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); cancel_work_sync(&ppd->freeze_work); } static inline int init_cpu_counters(struct hfi1_devdata *dd) { struct hfi1_pportdata *ppd; int i; ppd = (struct hfi1_pportdata *)(dd + 1); for (i = 0; i < dd->num_pports; i++, ppd++) { ppd->ibport_data.rvp.rc_acks = NULL; ppd->ibport_data.rvp.rc_qacks = NULL; ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64); ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64); ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64); if (!ppd->ibport_data.rvp.rc_acks || !ppd->ibport_data.rvp.rc_delayed_comp || !ppd->ibport_data.rvp.rc_qacks) return -ENOMEM; } return 0; } /* * index is the index into the receive array */ void hfi1_put_tid(struct hfi1_devdata *dd, u32 index, u32 type, unsigned long pa, u16 order) { u64 reg; if (!(dd->flags & HFI1_PRESENT)) goto done; if (type == PT_INVALID || type == PT_INVALID_FLUSH) { pa = 0; order = 0; } else if (type > PT_INVALID) { dd_dev_err(dd, "unexpected receive array type %u for index %u, not handled\n", type, index); goto done; } trace_hfi1_put_tid(dd, index, type, pa, order); #define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */ reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK) << RCV_ARRAY_RT_ADDR_SHIFT; trace_hfi1_write_rcvarray(dd->rcvarray_wc + (index * 8), reg); writeq(reg, dd->rcvarray_wc + (index * 8)); if (type == PT_EAGER || type == PT_INVALID_FLUSH || (index & 3) == 3) /* * Eager entries are written and flushed * * Expected entries are flushed every 4 writes */ flush_wc(); done: return; } void hfi1_clear_tids(struct hfi1_ctxtdata *rcd) { struct hfi1_devdata *dd = rcd->dd; u32 i; /* this could be optimized */ for (i = rcd->eager_base; i < rcd->eager_base + rcd->egrbufs.alloced; i++) hfi1_put_tid(dd, i, PT_INVALID, 0, 0); for (i = rcd->expected_base; i < rcd->expected_base + rcd->expected_count; i++) hfi1_put_tid(dd, i, PT_INVALID, 0, 0); } static const char * const ib_cfg_name_strings[] = { "HFI1_IB_CFG_LIDLMC", "HFI1_IB_CFG_LWID_DG_ENB", "HFI1_IB_CFG_LWID_ENB", "HFI1_IB_CFG_LWID", "HFI1_IB_CFG_SPD_ENB", "HFI1_IB_CFG_SPD", "HFI1_IB_CFG_RXPOL_ENB", "HFI1_IB_CFG_LREV_ENB", "HFI1_IB_CFG_LINKLATENCY", "HFI1_IB_CFG_HRTBT", "HFI1_IB_CFG_OP_VLS", "HFI1_IB_CFG_VL_HIGH_CAP", "HFI1_IB_CFG_VL_LOW_CAP", "HFI1_IB_CFG_OVERRUN_THRESH", "HFI1_IB_CFG_PHYERR_THRESH", "HFI1_IB_CFG_LINKDEFAULT", "HFI1_IB_CFG_PKEYS", "HFI1_IB_CFG_MTU", "HFI1_IB_CFG_LSTATE", "HFI1_IB_CFG_VL_HIGH_LIMIT", "HFI1_IB_CFG_PMA_TICKS", "HFI1_IB_CFG_PORT" }; static const char *ib_cfg_name(int which) { if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings)) return "invalid"; return ib_cfg_name_strings[which]; } int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which) { struct hfi1_devdata *dd = ppd->dd; int val = 0; switch (which) { case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */ val = ppd->link_width_enabled; break; case HFI1_IB_CFG_LWID: /* currently active Link-width */ val = ppd->link_width_active; break; case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */ val = ppd->link_speed_enabled; break; case HFI1_IB_CFG_SPD: /* current Link speed */ val = ppd->link_speed_active; break; case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */ case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */ case HFI1_IB_CFG_LINKLATENCY: goto unimplemented; case HFI1_IB_CFG_OP_VLS: val = ppd->actual_vls_operational; break; case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */ val = VL_ARB_HIGH_PRIO_TABLE_SIZE; break; case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */ val = VL_ARB_LOW_PRIO_TABLE_SIZE; break; case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */ val = ppd->overrun_threshold; break; case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */ val = ppd->phy_error_threshold; break; case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */ val = HLS_DEFAULT; break; case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */ case HFI1_IB_CFG_PMA_TICKS: default: unimplemented: if (HFI1_CAP_IS_KSET(PRINT_UNIMPL)) dd_dev_info( dd, "%s: which %s: not implemented\n", __func__, ib_cfg_name(which)); break; } return val; } /* * The largest MAD packet size. */ #define MAX_MAD_PACKET 2048 /* * Return the maximum header bytes that can go on the _wire_ * for this device. This count includes the ICRC which is * not part of the packet held in memory but it is appended * by the HW. * This is dependent on the device's receive header entry size. * HFI allows this to be set per-receive context, but the * driver presently enforces a global value. */ u32 lrh_max_header_bytes(struct hfi1_devdata *dd) { /* * The maximum non-payload (MTU) bytes in LRH.PktLen are * the Receive Header Entry Size minus the PBC (or RHF) size * plus one DW for the ICRC appended by HW. * * dd->rcd[0].rcvhdrqentsize is in DW. * We use rcd[0] as all context will have the same value. Also, * the first kernel context would have been allocated by now so * we are guaranteed a valid value. */ return (get_hdrqentsize(dd->rcd[0]) - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2; } /* * Set Send Length * @ppd: per port data * * Set the MTU by limiting how many DWs may be sent. The SendLenCheck* * registers compare against LRH.PktLen, so use the max bytes included * in the LRH. * * This routine changes all VL values except VL15, which it maintains at * the same value. */ static void set_send_length(struct hfi1_pportdata *ppd) { struct hfi1_devdata *dd = ppd->dd; u32 max_hb = lrh_max_header_bytes(dd), dcmtu; u32 maxvlmtu = dd->vld[15].mtu; u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2) & SEND_LEN_CHECK1_LEN_VL15_MASK) << SEND_LEN_CHECK1_LEN_VL15_SHIFT; int i, j; u32 thres; for (i = 0; i < ppd->vls_supported; i++) { if (dd->vld[i].mtu > maxvlmtu) maxvlmtu = dd->vld[i].mtu; if (i <= 3) len1 |= (((dd->vld[i].mtu + max_hb) >> 2) & SEND_LEN_CHECK0_LEN_VL0_MASK) << ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT); else len2 |= (((dd->vld[i].mtu + max_hb) >> 2) & SEND_LEN_CHECK1_LEN_VL4_MASK) << ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT); } write_csr(dd, SEND_LEN_CHECK0, len1); write_csr(dd, SEND_LEN_CHECK1, len2); /* adjust kernel credit return thresholds based on new MTUs */ /* all kernel receive contexts have the same hdrqentsize */ for (i = 0; i < ppd->vls_supported; i++) { thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50), sc_mtu_to_threshold(dd->vld[i].sc, dd->vld[i].mtu, get_hdrqentsize(dd->rcd[0]))); for (j = 0; j < INIT_SC_PER_VL; j++) sc_set_cr_threshold( pio_select_send_context_vl(dd, j, i), thres); } thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50), sc_mtu_to_threshold(dd->vld[15].sc, dd->vld[15].mtu, dd->rcd[0]->rcvhdrqentsize)); sc_set_cr_threshold(dd->vld[15].sc, thres); /* Adjust maximum MTU for the port in DC */ dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 : (ilog2(maxvlmtu >> 8) + 1); len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG); len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK; len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) << DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT; write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1); } static void set_lidlmc(struct hfi1_pportdata *ppd) { int i; u64 sreg = 0; struct hfi1_devdata *dd = ppd->dd; u32 mask = ~((1U << ppd->lmc) - 1); u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1); u32 lid; /* * Program 0 in CSR if port lid is extended. This prevents * 9B packets being sent out for large lids. */ lid = (ppd->lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) ? 0 : ppd->lid; c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK); c1 |= ((lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK) << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) | ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK) << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT); write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1); /* * Iterate over all the send contexts and set their SLID check */ sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) << SEND_CTXT_CHECK_SLID_MASK_SHIFT) | (((lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) << SEND_CTXT_CHECK_SLID_VALUE_SHIFT); for (i = 0; i < chip_send_contexts(dd); i++) { hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x", i, (u32)sreg); write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg); } /* Now we have to do the same thing for the sdma engines */ sdma_update_lmc(dd, mask, lid); } static const char *state_completed_string(u32 completed) { static const char * const state_completed[] = { "EstablishComm", "OptimizeEQ", "VerifyCap" }; if (completed < ARRAY_SIZE(state_completed)) return state_completed[completed]; return "unknown"; } static const char all_lanes_dead_timeout_expired[] = "All lanes were inactive – was the interconnect media removed?"; static const char tx_out_of_policy[] = "Passing lanes on local port do not meet the local link width policy"; static const char no_state_complete[] = "State timeout occurred before link partner completed the state"; static const char * const state_complete_reasons[] = { [0x00] = "Reason unknown", [0x01] = "Link was halted by driver, refer to LinkDownReason", [0x02] = "Link partner reported failure", [0x10] = "Unable to achieve frame sync on any lane", [0x11] = "Unable to find a common bit rate with the link partner", [0x12] = "Unable to achieve frame sync on sufficient lanes to meet the local link width policy", [0x13] = "Unable to identify preset equalization on sufficient lanes to meet the local link width policy", [0x14] = no_state_complete, [0x15] = "State timeout occurred before link partner identified equalization presets", [0x16] = "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy", [0x17] = tx_out_of_policy, [0x20] = all_lanes_dead_timeout_expired, [0x21] = "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy", [0x22] = no_state_complete, [0x23] = "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy", [0x24] = tx_out_of_policy, [0x30] = all_lanes_dead_timeout_expired, [0x31] = "State timeout occurred waiting for host to process received frames", [0x32] = no_state_complete, [0x33] = "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy", [0x34] = tx_out_of_policy, [0x35] = "Negotiated link width is mutually exclusive", [0x36] = "Timed out before receiving verifycap frames in VerifyCap.Exchange", [0x37] = "Unable to resolve secure data exchange", }; static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd, u32 code) { const char *str = NULL; if (code < ARRAY_SIZE(state_complete_reasons)) str = state_complete_reasons[code]; if (str) return str; return "Reserved"; } /* describe the given last state complete frame */ static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame, const char *prefix) { struct hfi1_devdata *dd = ppd->dd; u32 success; u32 state; u32 reason; u32 lanes; /* * Decode frame: * [ 0: 0] - success * [ 3: 1] - state * [ 7: 4] - next state timeout * [15: 8] - reason code * [31:16] - lanes */ success = frame & 0x1; state = (frame >> 1) & 0x7; reason = (frame >> 8) & 0xff; lanes = (frame >> 16) & 0xffff; dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n", prefix, frame); dd_dev_err(dd, " last reported state state: %s (0x%x)\n", state_completed_string(state), state); dd_dev_err(dd, " state successfully completed: %s\n", success ? "yes" : "no"); dd_dev_err(dd, " fail reason 0x%x: %s\n", reason, state_complete_reason_code_string(ppd, reason)); dd_dev_err(dd, " passing lane mask: 0x%x", lanes); } /* * Read the last state complete frames and explain them. This routine * expects to be called if the link went down during link negotiation * and initialization (LNI). That is, anywhere between polling and link up. */ static void check_lni_states(struct hfi1_pportdata *ppd) { u32 last_local_state; u32 last_remote_state; read_last_local_state(ppd->dd, &last_local_state); read_last_remote_state(ppd->dd, &last_remote_state); /* * Don't report anything if there is nothing to report. A value of * 0 means the link was taken down while polling and there was no * training in-process. */ if (last_local_state == 0 && last_remote_state == 0) return; decode_state_complete(ppd, last_local_state, "transmitted"); decode_state_complete(ppd, last_remote_state, "received"); } /* wait for wait_ms for LINK_TRANSFER_ACTIVE to go to 1 */ static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms) { u64 reg; unsigned long timeout; /* watch LCB_STS_LINK_TRANSFER_ACTIVE */ timeout = jiffies + msecs_to_jiffies(wait_ms); while (1) { reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE); if (reg) break; if (time_after(jiffies, timeout)) { dd_dev_err(dd, "timeout waiting for LINK_TRANSFER_ACTIVE\n"); return -ETIMEDOUT; } udelay(2); } return 0; } /* called when the logical link state is not down as it should be */ static void force_logical_link_state_down(struct hfi1_pportdata *ppd) { struct hfi1_devdata *dd = ppd->dd; /* * Bring link up in LCB loopback */ write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1); write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK); write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0); write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0); write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110); write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x2); write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0); (void)read_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET); udelay(3); write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 1); write_csr(dd, DC_LCB_CFG_RUN, 1ull << DC_LCB_CFG_RUN_EN_SHIFT); wait_link_transfer_active(dd, 100); /* * Bring the link down again. */ write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1); write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0); write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0); dd_dev_info(ppd->dd, "logical state forced to LINK_DOWN\n"); } /* * Helper for set_link_state(). Do not call except from that routine. * Expects ppd->hls_mutex to be held. * * @rem_reason value to be sent to the neighbor * * LinkDownReasons only set if transition succeeds. */ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason) { struct hfi1_devdata *dd = ppd->dd; u32 previous_state; int offline_state_ret; int ret; update_lcb_cache(dd); previous_state = ppd->host_link_state; ppd->host_link_state = HLS_GOING_OFFLINE; /* start offline transition */ ret = set_physical_link_state(dd, (rem_reason << 8) | PLS_OFFLINE); if (ret != HCMD_SUCCESS) { dd_dev_err(dd, "Failed to transition to Offline link state, return %d\n", ret); return -EINVAL; } if (ppd->offline_disabled_reason == HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)) ppd->offline_disabled_reason = HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT); offline_state_ret = wait_phys_link_offline_substates(ppd, 10000); if (offline_state_ret < 0) return offline_state_ret; /* Disabling AOC transmitters */ if (ppd->port_type == PORT_TYPE_QSFP && ppd->qsfp_info.limiting_active && qsfp_mod_present(ppd)) { int ret; ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT); if (ret == 0) { set_qsfp_tx(ppd, 0); release_chip_resource(dd, qsfp_resource(dd)); } else { /* not fatal, but should warn */ dd_dev_err(dd, "Unable to acquire lock to turn off QSFP TX\n"); } } /* * Wait for the offline.Quiet transition if it hasn't happened yet. It * can take a while for the link to go down. */ if (offline_state_ret != PLS_OFFLINE_QUIET) { ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 30000); if (ret < 0) return ret; } /* * Now in charge of LCB - must be after the physical state is * offline.quiet and before host_link_state is changed. */ set_host_lcb_access(dd); write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */ /* make sure the logical state is also down */ ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000); if (ret) force_logical_link_state_down(ppd); ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */ update_statusp(ppd, IB_PORT_DOWN); /* * The LNI has a mandatory wait time after the physical state * moves to Offline.Quiet. The wait time may be different * depending on how the link went down. The 8051 firmware * will observe the needed wait time and only move to ready * when that is completed. The largest of the quiet timeouts * is 6s, so wait that long and then at least 0.5s more for * other transitions, and another 0.5s for a buffer. */ ret = wait_fm_ready(dd, 7000); if (ret) { dd_dev_err(dd, "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n"); /* state is really offline, so make it so */ ppd->host_link_state = HLS_DN_OFFLINE; return ret; } /* * The state is now offline and the 8051 is ready to accept host * requests. * - change our state * - notify others if we were previously in a linkup state */ ppd->host_link_state = HLS_DN_OFFLINE; if (previous_state & HLS_UP) { /* went down while link was up */ handle_linkup_change(dd, 0); } else if (previous_state & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) { /* went down while attempting link up */ check_lni_states(ppd); /* The QSFP doesn't need to be reset on LNI failure */ ppd->qsfp_info.reset_needed = 0; } /* the active link width (downgrade) is 0 on link down */ ppd->link_width_active = 0; ppd->link_width_downgrade_tx_active = 0; ppd->link_width_downgrade_rx_active = 0; ppd->current_egress_rate = 0; return 0; } /* return the link state name */ static const char *link_state_name(u32 state) { const char *name; int n = ilog2(state); static const char * const names[] = { [__HLS_UP_INIT_BP] = "INIT", [__HLS_UP_ARMED_BP] = "ARMED", [__HLS_UP_ACTIVE_BP] = "ACTIVE", [__HLS_DN_DOWNDEF_BP] = "DOWNDEF", [__HLS_DN_POLL_BP] = "POLL", [__HLS_DN_DISABLE_BP] = "DISABLE", [__HLS_DN_OFFLINE_BP] = "OFFLINE", [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP", [__HLS_GOING_UP_BP] = "GOING_UP", [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE", [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN" }; name = n < ARRAY_SIZE(names) ? names[n] : NULL; return name ? name : "unknown"; } /* return the link state reason name */ static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state) { if (state == HLS_UP_INIT) { switch (ppd->linkinit_reason) { case OPA_LINKINIT_REASON_LINKUP: return "(LINKUP)"; case OPA_LINKINIT_REASON_FLAPPING: return "(FLAPPING)"; case OPA_LINKINIT_OUTSIDE_POLICY: return "(OUTSIDE_POLICY)"; case OPA_LINKINIT_QUARANTINED: return "(QUARANTINED)"; case OPA_LINKINIT_INSUFIC_CAPABILITY: return "(INSUFIC_CAPABILITY)"; default: break; } } return ""; } /* * driver_pstate - convert the driver's notion of a port's * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*). * Return -1 (converted to a u32) to indicate error. */ u32 driver_pstate(struct hfi1_pportdata *ppd) { switch (ppd->host_link_state) { case HLS_UP_INIT: case HLS_UP_ARMED: case HLS_UP_ACTIVE: return IB_PORTPHYSSTATE_LINKUP; case HLS_DN_POLL: return IB_PORTPHYSSTATE_POLLING; case HLS_DN_DISABLE: return IB_PORTPHYSSTATE_DISABLED; case HLS_DN_OFFLINE: return OPA_PORTPHYSSTATE_OFFLINE; case HLS_VERIFY_CAP: return IB_PORTPHYSSTATE_TRAINING; case HLS_GOING_UP: return IB_PORTPHYSSTATE_TRAINING; case HLS_GOING_OFFLINE: return OPA_PORTPHYSSTATE_OFFLINE; case HLS_LINK_COOLDOWN: return OPA_PORTPHYSSTATE_OFFLINE; case HLS_DN_DOWNDEF: default: dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n", ppd->host_link_state); return -1; } } /* * driver_lstate - convert the driver's notion of a port's * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1 * (converted to a u32) to indicate error. */ u32 driver_lstate(struct hfi1_pportdata *ppd) { if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN)) return IB_PORT_DOWN; switch (ppd->host_link_state & HLS_UP) { case HLS_UP_INIT: return IB_PORT_INIT; case HLS_UP_ARMED: return IB_PORT_ARMED; case HLS_UP_ACTIVE: return IB_PORT_ACTIVE; default: dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n", ppd->host_link_state); return -1; } } void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason, u8 neigh_reason, u8 rem_reason) { if (ppd->local_link_down_reason.latest == 0 && ppd->neigh_link_down_reason.latest == 0) { ppd->local_link_down_reason.latest = lcl_reason; ppd->neigh_link_down_reason.latest = neigh_reason; ppd->remote_link_down_reason = rem_reason; } } /** * data_vls_operational() - Verify if data VL BCT credits and MTU * are both set. * @ppd: pointer to hfi1_pportdata structure * * Return: true - Ok, false -otherwise. */ static inline bool data_vls_operational(struct hfi1_pportdata *ppd) { int i; u64 reg; if (!ppd->actual_vls_operational) return false; for (i = 0; i < ppd->vls_supported; i++) { reg = read_csr(ppd->dd, SEND_CM_CREDIT_VL + (8 * i)); if ((reg && !ppd->dd->vld[i].mtu) || (!reg && ppd->dd->vld[i].mtu)) return false; } return true; } /* * Change the physical and/or logical link state. * * Do not call this routine while inside an interrupt. It contains * calls to routines that can take multiple seconds to finish. * * Returns 0 on success, -errno on failure. */ int set_link_state(struct hfi1_pportdata *ppd, u32 state) { struct hfi1_devdata *dd = ppd->dd; struct ib_event event = {.device = NULL}; int ret1, ret = 0; int orig_new_state, poll_bounce; mutex_lock(&ppd->hls_lock); orig_new_state = state; if (state == HLS_DN_DOWNDEF) state = HLS_DEFAULT; /* interpret poll -> poll as a link bounce */ poll_bounce = ppd->host_link_state == HLS_DN_POLL && state == HLS_DN_POLL; dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__, link_state_name(ppd->host_link_state), link_state_name(orig_new_state), poll_bounce ? "(bounce) " : "", link_state_reason_name(ppd, state)); /* * If we're going to a (HLS_*) link state that implies the logical * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then * reset is_sm_config_started to 0. */ if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE))) ppd->is_sm_config_started = 0; /* * Do nothing if the states match. Let a poll to poll link bounce * go through. */ if (ppd->host_link_state == state && !poll_bounce) goto done; switch (state) { case HLS_UP_INIT: if (ppd->host_link_state == HLS_DN_POLL && (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) { /* * Quick link up jumps from polling to here. * * Whether in normal or loopback mode, the * simulator jumps from polling to link up. * Accept that here. */ /* OK */ } else if (ppd->host_link_state != HLS_GOING_UP) { goto unexpected; } /* * Wait for Link_Up physical state. * Physical and Logical states should already be * be transitioned to LinkUp and LinkInit respectively. */ ret = wait_physical_linkstate(ppd, PLS_LINKUP, 1000); if (ret) { dd_dev_err(dd, "%s: physical state did not change to LINK-UP\n", __func__); break; } ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000); if (ret) { dd_dev_err(dd, "%s: logical state did not change to INIT\n", __func__); break; } /* clear old transient LINKINIT_REASON code */ if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR) ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP; /* enable the port */ add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); handle_linkup_change(dd, 1); pio_kernel_linkup(dd); /* * After link up, a new link width will have been set. * Update the xmit counters with regards to the new * link width. */ update_xmit_counters(ppd, ppd->link_width_active); ppd->host_link_state = HLS_UP_INIT; update_statusp(ppd, IB_PORT_INIT); break; case HLS_UP_ARMED: if (ppd->host_link_state != HLS_UP_INIT) goto unexpected; if (!data_vls_operational(ppd)) { dd_dev_err(dd, "%s: Invalid data VL credits or mtu\n", __func__); ret = -EINVAL; break; } set_logical_state(dd, LSTATE_ARMED); ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000); if (ret) { dd_dev_err(dd, "%s: logical state did not change to ARMED\n", __func__); break; } ppd->host_link_state = HLS_UP_ARMED; update_statusp(ppd, IB_PORT_ARMED); /* * The simulator does not currently implement SMA messages, * so neighbor_normal is not set. Set it here when we first * move to Armed. */ if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) ppd->neighbor_normal = 1; break; case HLS_UP_ACTIVE: if (ppd->host_link_state != HLS_UP_ARMED) goto unexpected; set_logical_state(dd, LSTATE_ACTIVE); ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000); if (ret) { dd_dev_err(dd, "%s: logical state did not change to ACTIVE\n", __func__); } else { /* tell all engines to go running */ sdma_all_running(dd); ppd->host_link_state = HLS_UP_ACTIVE; update_statusp(ppd, IB_PORT_ACTIVE); /* Signal the IB layer that the port has went active */ event.device = &dd->verbs_dev.rdi.ibdev; event.element.port_num = ppd->port; event.event = IB_EVENT_PORT_ACTIVE; } break; case HLS_DN_POLL: if ((ppd->host_link_state == HLS_DN_DISABLE || ppd->host_link_state == HLS_DN_OFFLINE) && dd->dc_shutdown) dc_start(dd); /* Hand LED control to the DC */ write_csr(dd, DCC_CFG_LED_CNTRL, 0); if (ppd->host_link_state != HLS_DN_OFFLINE) { u8 tmp = ppd->link_enabled; ret = goto_offline(ppd, ppd->remote_link_down_reason); if (ret) { ppd->link_enabled = tmp; break; } ppd->remote_link_down_reason = 0; if (ppd->driver_link_ready) ppd->link_enabled = 1; } set_all_slowpath(ppd->dd); ret = set_local_link_attributes(ppd); if (ret) break; ppd->port_error_action = 0; if (quick_linkup) { /* quick linkup does not go into polling */ ret = do_quick_linkup(dd); } else { ret1 = set_physical_link_state(dd, PLS_POLLING); if (!ret1) ret1 = wait_phys_link_out_of_offline(ppd, 3000); if (ret1 != HCMD_SUCCESS) { dd_dev_err(dd, "Failed to transition to Polling link state, return 0x%x\n", ret1); ret = -EINVAL; } } /* * Change the host link state after requesting DC8051 to * change its physical state so that we can ignore any * interrupt with stale LNI(XX) error, which will not be * cleared until DC8051 transitions to Polling state. */ ppd->host_link_state = HLS_DN_POLL; ppd->offline_disabled_reason = HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE); /* * If an error occurred above, go back to offline. The * caller may reschedule another attempt. */ if (ret) goto_offline(ppd, 0); else log_physical_state(ppd, PLS_POLLING); break; case HLS_DN_DISABLE: /* link is disabled */ ppd->link_enabled = 0; /* allow any state to transition to disabled */ /* must transition to offline first */ if (ppd->host_link_state != HLS_DN_OFFLINE) { ret = goto_offline(ppd, ppd->remote_link_down_reason); if (ret) break; ppd->remote_link_down_reason = 0; } if (!dd->dc_shutdown) { ret1 = set_physical_link_state(dd, PLS_DISABLED); if (ret1 != HCMD_SUCCESS) { dd_dev_err(dd, "Failed to transition to Disabled link state, return 0x%x\n", ret1); ret = -EINVAL; break; } ret = wait_physical_linkstate(ppd, PLS_DISABLED, 10000); if (ret) { dd_dev_err(dd, "%s: physical state did not change to DISABLED\n", __func__); break; } dc_shutdown(dd); } ppd->host_link_state = HLS_DN_DISABLE; break; case HLS_DN_OFFLINE: if (ppd->host_link_state == HLS_DN_DISABLE) dc_start(dd); /* allow any state to transition to offline */ ret = goto_offline(ppd, ppd->remote_link_down_reason); if (!ret) ppd->remote_link_down_reason = 0; break; case HLS_VERIFY_CAP: if (ppd->host_link_state != HLS_DN_POLL) goto unexpected; ppd->host_link_state = HLS_VERIFY_CAP; log_physical_state(ppd, PLS_CONFIGPHY_VERIFYCAP); break; case HLS_GOING_UP: if (ppd->host_link_state != HLS_VERIFY_CAP) goto unexpected; ret1 = set_physical_link_state(dd, PLS_LINKUP); if (ret1 != HCMD_SUCCESS) { dd_dev_err(dd, "Failed to transition to link up state, return 0x%x\n", ret1); ret = -EINVAL; break; } ppd->host_link_state = HLS_GOING_UP; break; case HLS_GOING_OFFLINE: /* transient within goto_offline() */ case HLS_LINK_COOLDOWN: /* transient within goto_offline() */ default: dd_dev_info(dd, "%s: state 0x%x: not supported\n", __func__, state); ret = -EINVAL; break; } goto done; unexpected: dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n", __func__, link_state_name(ppd->host_link_state), link_state_name(state)); ret = -EINVAL; done: mutex_unlock(&ppd->hls_lock); if (event.device) ib_dispatch_event(&event); return ret; } int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val) { u64 reg; int ret = 0; switch (which) { case HFI1_IB_CFG_LIDLMC: set_lidlmc(ppd); break; case HFI1_IB_CFG_VL_HIGH_LIMIT: /* * The VL Arbitrator high limit is sent in units of 4k * bytes, while HFI stores it in units of 64 bytes. */ val *= 4096 / 64; reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK) << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT; write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg); break; case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */ /* HFI only supports POLL as the default link down state */ if (val != HLS_DN_POLL) ret = -EINVAL; break; case HFI1_IB_CFG_OP_VLS: if (ppd->vls_operational != val) { ppd->vls_operational = val; if (!ppd->port) ret = -EINVAL; } break; /* * For link width, link width downgrade, and speed enable, always AND * the setting with what is actually supported. This has two benefits. * First, enabled can't have unsupported values, no matter what the * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean * "fill in with your supported value" have all the bits in the * field set, so simply ANDing with supported has the desired result. */ case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */ ppd->link_width_enabled = val & ppd->link_width_supported; break; case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */ ppd->link_width_downgrade_enabled = val & ppd->link_width_downgrade_supported; break; case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */ ppd->link_speed_enabled = val & ppd->link_speed_supported; break; case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */ /* * HFI does not follow IB specs, save this value * so we can report it, if asked. */ ppd->overrun_threshold = val; break; case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */ /* * HFI does not follow IB specs, save this value * so we can report it, if asked. */ ppd->phy_error_threshold = val; break; case HFI1_IB_CFG_MTU: set_send_length(ppd); break; case HFI1_IB_CFG_PKEYS: if (HFI1_CAP_IS_KSET(PKEY_CHECK)) set_partition_keys(ppd); break; default: if (HFI1_CAP_IS_KSET(PRINT_UNIMPL)) dd_dev_info(ppd->dd, "%s: which %s, val 0x%x: not implemented\n", __func__, ib_cfg_name(which), val); break; } return ret; } /* begin functions related to vl arbitration table caching */ static void init_vl_arb_caches(struct hfi1_pportdata *ppd) { int i; BUILD_BUG_ON(VL_ARB_TABLE_SIZE != VL_ARB_LOW_PRIO_TABLE_SIZE); BUILD_BUG_ON(VL_ARB_TABLE_SIZE != VL_ARB_HIGH_PRIO_TABLE_SIZE); /* * Note that we always return values directly from the * 'vl_arb_cache' (and do no CSR reads) in response to a * 'Get(VLArbTable)'. This is obviously correct after a * 'Set(VLArbTable)', since the cache will then be up to * date. But it's also correct prior to any 'Set(VLArbTable)' * since then both the cache, and the relevant h/w registers * will be zeroed. */ for (i = 0; i < MAX_PRIO_TABLE; i++) spin_lock_init(&ppd->vl_arb_cache[i].lock); } /* * vl_arb_lock_cache * * All other vl_arb_* functions should be called only after locking * the cache. */ static inline struct vl_arb_cache * vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx) { if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE) return NULL; spin_lock(&ppd->vl_arb_cache[idx].lock); return &ppd->vl_arb_cache[idx]; } static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx) { spin_unlock(&ppd->vl_arb_cache[idx].lock); } static void vl_arb_get_cache(struct vl_arb_cache *cache, struct ib_vl_weight_elem *vl) { memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl)); } static void vl_arb_set_cache(struct vl_arb_cache *cache, struct ib_vl_weight_elem *vl) { memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl)); } static int vl_arb_match_cache(struct vl_arb_cache *cache, struct ib_vl_weight_elem *vl) { return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl)); } /* end functions related to vl arbitration table caching */ static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target, u32 size, struct ib_vl_weight_elem *vl) { struct hfi1_devdata *dd = ppd->dd; u64 reg; unsigned int i, is_up = 0; int drain, ret = 0; mutex_lock(&ppd->hls_lock); if (ppd->host_link_state & HLS_UP) is_up = 1; drain = !is_ax(dd) && is_up; if (drain) /* * Before adjusting VL arbitration weights, empty per-VL * FIFOs, otherwise a packet whose VL weight is being * set to 0 could get stuck in a FIFO with no chance to * egress. */ ret = stop_drain_data_vls(dd); if (ret) { dd_dev_err( dd, "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n", __func__); goto err; } for (i = 0; i < size; i++, vl++) { /* * NOTE: The low priority shift and mask are used here, but * they are the same for both the low and high registers. */ reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK) << SEND_LOW_PRIORITY_LIST_VL_SHIFT) | (((u64)vl->weight & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK) << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT); write_csr(dd, target + (i * 8), reg); } pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE); if (drain) open_fill_data_vls(dd); /* reopen all VLs */ err: mutex_unlock(&ppd->hls_lock); return ret; } /* * Read one credit merge VL register. */ static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr, struct vl_limit *vll) { u64 reg = read_csr(dd, csr); vll->dedicated = cpu_to_be16( (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT) & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK); vll->shared = cpu_to_be16( (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT) & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK); } /* * Read the current credit merge limits. */ static int get_buffer_control(struct hfi1_devdata *dd, struct buffer_control *bc, u16 *overall_limit) { u64 reg; int i; /* not all entries are filled in */ memset(bc, 0, sizeof(*bc)); /* OPA and HFI have a 1-1 mapping */ for (i = 0; i < TXE_NUM_DATA_VL; i++) read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]); /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */ read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]); reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); bc->overall_shared_limit = cpu_to_be16( (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK); if (overall_limit) *overall_limit = (reg >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK; return sizeof(struct buffer_control); } static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp) { u64 reg; int i; /* each register contains 16 SC->VLnt mappings, 4 bits each */ reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0); for (i = 0; i < sizeof(u64); i++) { u8 byte = *(((u8 *)&reg) + i); dp->vlnt[2 * i] = byte & 0xf; dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4; } reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16); for (i = 0; i < sizeof(u64); i++) { u8 byte = *(((u8 *)&reg) + i); dp->vlnt[16 + (2 * i)] = byte & 0xf; dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4; } return sizeof(struct sc2vlnt); } static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems, struct ib_vl_weight_elem *vl) { unsigned int i; for (i = 0; i < nelems; i++, vl++) { vl->vl = 0xf; vl->weight = 0; } } static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp) { write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(15_0, 0, dp->vlnt[0] & 0xf, 1, dp->vlnt[1] & 0xf, 2, dp->vlnt[2] & 0xf, 3, dp->vlnt[3] & 0xf, 4, dp->vlnt[4] & 0xf, 5, dp->vlnt[5] & 0xf, 6, dp->vlnt[6] & 0xf, 7, dp->vlnt[7] & 0xf, 8, dp->vlnt[8] & 0xf, 9, dp->vlnt[9] & 0xf, 10, dp->vlnt[10] & 0xf, 11, dp->vlnt[11] & 0xf, 12, dp->vlnt[12] & 0xf, 13, dp->vlnt[13] & 0xf, 14, dp->vlnt[14] & 0xf, 15, dp->vlnt[15] & 0xf)); write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(31_16, 16, dp->vlnt[16] & 0xf, 17, dp->vlnt[17] & 0xf, 18, dp->vlnt[18] & 0xf, 19, dp->vlnt[19] & 0xf, 20, dp->vlnt[20] & 0xf, 21, dp->vlnt[21] & 0xf, 22, dp->vlnt[22] & 0xf, 23, dp->vlnt[23] & 0xf, 24, dp->vlnt[24] & 0xf, 25, dp->vlnt[25] & 0xf, 26, dp->vlnt[26] & 0xf, 27, dp->vlnt[27] & 0xf, 28, dp->vlnt[28] & 0xf, 29, dp->vlnt[29] & 0xf, 30, dp->vlnt[30] & 0xf, 31, dp->vlnt[31] & 0xf)); } static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what, u16 limit) { if (limit != 0) dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n", what, (int)limit, idx); } /* change only the shared limit portion of SendCmGLobalCredit */ static void set_global_shared(struct hfi1_devdata *dd, u16 limit) { u64 reg; reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK; reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT; write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg); } /* change only the total credit limit portion of SendCmGLobalCredit */ static void set_global_limit(struct hfi1_devdata *dd, u16 limit) { u64 reg; reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK; reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT; write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg); } /* set the given per-VL shared limit */ static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit) { u64 reg; u32 addr; if (vl < TXE_NUM_DATA_VL) addr = SEND_CM_CREDIT_VL + (8 * vl); else addr = SEND_CM_CREDIT_VL15; reg = read_csr(dd, addr); reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK; reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT; write_csr(dd, addr, reg); } /* set the given per-VL dedicated limit */ static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit) { u64 reg; u32 addr; if (vl < TXE_NUM_DATA_VL) addr = SEND_CM_CREDIT_VL + (8 * vl); else addr = SEND_CM_CREDIT_VL15; reg = read_csr(dd, addr); reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK; reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT; write_csr(dd, addr, reg); } /* spin until the given per-VL status mask bits clear */ static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask, const char *which) { unsigned long timeout; u64 reg; timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT); while (1) { reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask; if (reg == 0) return; /* success */ if (time_after(jiffies, timeout)) break; /* timed out */ udelay(1); } dd_dev_err(dd, "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n", which, VL_STATUS_CLEAR_TIMEOUT, mask, reg); /* * If this occurs, it is likely there was a credit loss on the link. * The only recovery from that is a link bounce. */ dd_dev_err(dd, "Continuing anyway. A credit loss may occur. Suggest a link bounce\n"); } /* * The number of credits on the VLs may be changed while everything * is "live", but the following algorithm must be followed due to * how the hardware is actually implemented. In particular, * Return_Credit_Status[] is the only correct status check. * * if (reducing Global_Shared_Credit_Limit or any shared limit changing) * set Global_Shared_Credit_Limit = 0 * use_all_vl = 1 * mask0 = all VLs that are changing either dedicated or shared limits * set Shared_Limit[mask0] = 0 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0 * if (changing any dedicated limit) * mask1 = all VLs that are lowering dedicated limits * lower Dedicated_Limit[mask1] * spin until Return_Credit_Status[mask1] == 0 * raise Dedicated_Limits * raise Shared_Limits * raise Global_Shared_Credit_Limit * * lower = if the new limit is lower, set the limit to the new value * raise = if the new limit is higher than the current value (may be changed * earlier in the algorithm), set the new limit to the new value */ int set_buffer_control(struct hfi1_pportdata *ppd, struct buffer_control *new_bc) { struct hfi1_devdata *dd = ppd->dd; u64 changing_mask, ld_mask, stat_mask; int change_count; int i, use_all_mask; int this_shared_changing; int vl_count = 0, ret; /* * A0: add the variable any_shared_limit_changing below and in the * algorithm above. If removing A0 support, it can be removed. */ int any_shared_limit_changing; struct buffer_control cur_bc; u8 changing[OPA_MAX_VLS]; u8 lowering_dedicated[OPA_MAX_VLS]; u16 cur_total; u32 new_total = 0; const u64 all_mask = SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK; #define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15) #define NUM_USABLE_VLS 16 /* look at VL15 and less */ /* find the new total credits, do sanity check on unused VLs */ for (i = 0; i < OPA_MAX_VLS; i++) { if (valid_vl(i)) { new_total += be16_to_cpu(new_bc->vl[i].dedicated); continue; } nonzero_msg(dd, i, "dedicated", be16_to_cpu(new_bc->vl[i].dedicated)); nonzero_msg(dd, i, "shared", be16_to_cpu(new_bc->vl[i].shared)); new_bc->vl[i].dedicated = 0; new_bc->vl[i].shared = 0; } new_total += be16_to_cpu(new_bc->overall_shared_limit); /* fetch the current values */ get_buffer_control(dd, &cur_bc, &cur_total); /* * Create the masks we will use. */ memset(changing, 0, sizeof(changing)); memset(lowering_dedicated, 0, sizeof(lowering_dedicated)); /* * NOTE: Assumes that the individual VL bits are adjacent and in * increasing order */ stat_mask = SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK; changing_mask = 0; ld_mask = 0; change_count = 0; any_shared_limit_changing = 0; for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) { if (!valid_vl(i)) continue; this_shared_changing = new_bc->vl[i].shared != cur_bc.vl[i].shared; if (this_shared_changing) any_shared_limit_changing = 1; if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated || this_shared_changing) { changing[i] = 1; changing_mask |= stat_mask; change_count++; } if (be16_to_cpu(new_bc->vl[i].dedicated) < be16_to_cpu(cur_bc.vl[i].dedicated)) { lowering_dedicated[i] = 1; ld_mask |= stat_mask; } } /* bracket the credit change with a total adjustment */ if (new_total > cur_total) set_global_limit(dd, new_total); /* * Start the credit change algorithm. */ use_all_mask = 0; if ((be16_to_cpu(new_bc->overall_shared_limit) < be16_to_cpu(cur_bc.overall_shared_limit)) || (is_ax(dd) && any_shared_limit_changing)) { set_global_shared(dd, 0); cur_bc.overall_shared_limit = 0; use_all_mask = 1; } for (i = 0; i < NUM_USABLE_VLS; i++) { if (!valid_vl(i)) continue; if (changing[i]) { set_vl_shared(dd, i, 0); cur_bc.vl[i].shared = 0; } } wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask, "shared"); if (change_count > 0) { for (i = 0; i < NUM_USABLE_VLS; i++) { if (!valid_vl(i)) continue; if (lowering_dedicated[i]) { set_vl_dedicated(dd, i, be16_to_cpu(new_bc-> vl[i].dedicated)); cur_bc.vl[i].dedicated = new_bc->vl[i].dedicated; } } wait_for_vl_status_clear(dd, ld_mask, "dedicated"); /* now raise all dedicated that are going up */ for (i = 0; i < NUM_USABLE_VLS; i++) { if (!valid_vl(i)) continue; if (be16_to_cpu(new_bc->vl[i].dedicated) > be16_to_cpu(cur_bc.vl[i].dedicated)) set_vl_dedicated(dd, i, be16_to_cpu(new_bc-> vl[i].dedicated)); } } /* next raise all shared that are going up */ for (i = 0; i < NUM_USABLE_VLS; i++) { if (!valid_vl(i)) continue; if (be16_to_cpu(new_bc->vl[i].shared) > be16_to_cpu(cur_bc.vl[i].shared)) set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared)); } /* finally raise the global shared */ if (be16_to_cpu(new_bc->overall_shared_limit) > be16_to_cpu(cur_bc.overall_shared_limit)) set_global_shared(dd, be16_to_cpu(new_bc->overall_shared_limit)); /* bracket the credit change with a total adjustment */ if (new_total < cur_total) set_global_limit(dd, new_total); /* * Determine the actual number of operational VLS using the number of * dedicated and shared credits for each VL. */ if (change_count > 0) { for (i = 0; i < TXE_NUM_DATA_VL; i++) if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 || be16_to_cpu(new_bc->vl[i].shared) > 0) vl_count++; ppd->actual_vls_operational = vl_count; ret = sdma_map_init(dd, ppd->port - 1, vl_count ? ppd->actual_vls_operational : ppd->vls_operational, NULL); if (ret == 0) ret = pio_map_init(dd, ppd->port - 1, vl_count ? ppd->actual_vls_operational : ppd->vls_operational, NULL); if (ret) return ret; } return 0; } /* * Read the given fabric manager table. Return the size of the * table (in bytes) on success, and a negative error code on * failure. */ int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t) { int size; struct vl_arb_cache *vlc; switch (which) { case FM_TBL_VL_HIGH_ARB: size = 256; /* * OPA specifies 128 elements (of 2 bytes each), though * HFI supports only 16 elements in h/w. */ vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE); vl_arb_get_cache(vlc, t); vl_arb_unlock_cache(ppd, HI_PRIO_TABLE); break; case FM_TBL_VL_LOW_ARB: size = 256; /* * OPA specifies 128 elements (of 2 bytes each), though * HFI supports only 16 elements in h/w. */ vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE); vl_arb_get_cache(vlc, t); vl_arb_unlock_cache(ppd, LO_PRIO_TABLE); break; case FM_TBL_BUFFER_CONTROL: size = get_buffer_control(ppd->dd, t, NULL); break; case FM_TBL_SC2VLNT: size = get_sc2vlnt(ppd->dd, t); break; case FM_TBL_VL_PREEMPT_ELEMS: size = 256; /* OPA specifies 128 elements, of 2 bytes each */ get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t); break; case FM_TBL_VL_PREEMPT_MATRIX: size = 256; /* * OPA specifies that this is the same size as the VL * arbitration tables (i.e., 256 bytes). */ break; default: return -EINVAL; } return size; } /* * Write the given fabric manager table. */ int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t) { int ret = 0; struct vl_arb_cache *vlc; switch (which) { case FM_TBL_VL_HIGH_ARB: vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE); if (vl_arb_match_cache(vlc, t)) { vl_arb_unlock_cache(ppd, HI_PRIO_TABLE); break; } vl_arb_set_cache(vlc, t); vl_arb_unlock_cache(ppd, HI_PRIO_TABLE); ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST, VL_ARB_HIGH_PRIO_TABLE_SIZE, t); break; case FM_TBL_VL_LOW_ARB: vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE); if (vl_arb_match_cache(vlc, t)) { vl_arb_unlock_cache(ppd, LO_PRIO_TABLE); break; } vl_arb_set_cache(vlc, t); vl_arb_unlock_cache(ppd, LO_PRIO_TABLE); ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST, VL_ARB_LOW_PRIO_TABLE_SIZE, t); break; case FM_TBL_BUFFER_CONTROL: ret = set_buffer_control(ppd, t); break; case FM_TBL_SC2VLNT: set_sc2vlnt(ppd->dd, t); break; default: ret = -EINVAL; } return ret; } /* * Disable all data VLs. * * Return 0 if disabled, non-zero if the VLs cannot be disabled. */ static int disable_data_vls(struct hfi1_devdata *dd) { if (is_ax(dd)) return 1; pio_send_control(dd, PSC_DATA_VL_DISABLE); return 0; } /* * open_fill_data_vls() - the counterpart to stop_drain_data_vls(). * Just re-enables all data VLs (the "fill" part happens * automatically - the name was chosen for symmetry with * stop_drain_data_vls()). * * Return 0 if successful, non-zero if the VLs cannot be enabled. */ int open_fill_data_vls(struct hfi1_devdata *dd) { if (is_ax(dd)) return 1; pio_send_control(dd, PSC_DATA_VL_ENABLE); return 0; } /* * drain_data_vls() - assumes that disable_data_vls() has been called, * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA * engines to drop to 0. */ static void drain_data_vls(struct hfi1_devdata *dd) { sc_wait(dd); sdma_wait(dd); pause_for_credit_return(dd); } /* * stop_drain_data_vls() - disable, then drain all per-VL fifos. * * Use open_fill_data_vls() to resume using data VLs. This pair is * meant to be used like this: * * stop_drain_data_vls(dd); * // do things with per-VL resources * open_fill_data_vls(dd); */ int stop_drain_data_vls(struct hfi1_devdata *dd) { int ret; ret = disable_data_vls(dd); if (ret == 0) drain_data_vls(dd); return ret; } /* * Convert a nanosecond time to a cclock count. No matter how slow * the cclock, a non-zero ns will always have a non-zero result. */ u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns) { u32 cclocks; if (dd->icode == ICODE_FPGA_EMULATION) cclocks = (ns * 1000) / FPGA_CCLOCK_PS; else /* simulation pretends to be ASIC */ cclocks = (ns * 1000) / ASIC_CCLOCK_PS; if (ns && !cclocks) /* if ns nonzero, must be at least 1 */ cclocks = 1; return cclocks; } /* * Convert a cclock count to nanoseconds. Not matter how slow * the cclock, a non-zero cclocks will always have a non-zero result. */ u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks) { u32 ns; if (dd->icode == ICODE_FPGA_EMULATION) ns = (cclocks * FPGA_CCLOCK_PS) / 1000; else /* simulation pretends to be ASIC */ ns = (cclocks * ASIC_CCLOCK_PS) / 1000; if (cclocks && !ns) ns = 1; return ns; } /* * Dynamically adjust the receive interrupt timeout for a context based on * incoming packet rate. * * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero. */ static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts) { struct hfi1_devdata *dd = rcd->dd; u32 timeout = rcd->rcvavail_timeout; /* * This algorithm doubles or halves the timeout depending on whether * the number of packets received in this interrupt were less than or * greater equal the interrupt count. * * The calculations below do not allow a steady state to be achieved. * Only at the endpoints it is possible to have an unchanging * timeout. */ if (npkts < rcv_intr_count) { /* * Not enough packets arrived before the timeout, adjust * timeout downward. */ if (timeout < 2) /* already at minimum? */ return; timeout >>= 1; } else { /* * More than enough packets arrived before the timeout, adjust * timeout upward. */ if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */ return; timeout = min(timeout << 1, dd->rcv_intr_timeout_csr); } rcd->rcvavail_timeout = timeout; /* * timeout cannot be larger than rcv_intr_timeout_csr which has already * been verified to be in range */ write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT, (u64)timeout << RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT); } void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd, u32 intr_adjust, u32 npkts) { struct hfi1_devdata *dd = rcd->dd; u64 reg; u32 ctxt = rcd->ctxt; /* * Need to write timeout register before updating RcvHdrHead to ensure * that a new value is used when the HW decides to restart counting. */ if (intr_adjust) adjust_rcv_timeout(rcd, npkts); if (updegr) { reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK) << RCV_EGR_INDEX_HEAD_HEAD_SHIFT; write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg); } reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) | (((u64)hd & RCV_HDR_HEAD_HEAD_MASK) << RCV_HDR_HEAD_HEAD_SHIFT); write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg); } u32 hdrqempty(struct hfi1_ctxtdata *rcd) { u32 head, tail; head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD) & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT; if (hfi1_rcvhdrtail_kvaddr(rcd)) tail = get_rcvhdrtail(rcd); else tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL); return head == tail; } /* * Context Control and Receive Array encoding for buffer size: * 0x0 invalid * 0x1 4 KB * 0x2 8 KB * 0x3 16 KB * 0x4 32 KB * 0x5 64 KB * 0x6 128 KB * 0x7 256 KB * 0x8 512 KB (Receive Array only) * 0x9 1 MB (Receive Array only) * 0xa 2 MB (Receive Array only) * * 0xB-0xF - reserved (Receive Array only) * * * This routine assumes that the value has already been sanity checked. */ static u32 encoded_size(u32 size) { switch (size) { case 4 * 1024: return 0x1; case 8 * 1024: return 0x2; case 16 * 1024: return 0x3; case 32 * 1024: return 0x4; case 64 * 1024: return 0x5; case 128 * 1024: return 0x6; case 256 * 1024: return 0x7; case 512 * 1024: return 0x8; case 1 * 1024 * 1024: return 0x9; case 2 * 1024 * 1024: return 0xa; } return 0x1; /* if invalid, go with the minimum size */ } /** * encode_rcv_header_entry_size - return chip specific encoding for size * @size: size in dwords * * Convert a receive header entry size that to the encoding used in the CSR. * * Return a zero if the given size is invalid, otherwise the encoding. */ u8 encode_rcv_header_entry_size(u8 size) { /* there are only 3 valid receive header entry sizes */ if (size == 2) return 1; if (size == 16) return 2; if (size == 32) return 4; return 0; /* invalid */ } /** * hfi1_validate_rcvhdrcnt - validate hdrcnt * @dd: the device data * @thecnt: the header count */ int hfi1_validate_rcvhdrcnt(struct hfi1_devdata *dd, uint thecnt) { if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) { dd_dev_err(dd, "Receive header queue count too small\n"); return -EINVAL; } if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) { dd_dev_err(dd, "Receive header queue count cannot be greater than %u\n", HFI1_MAX_HDRQ_EGRBUF_CNT); return -EINVAL; } if (thecnt % HDRQ_INCREMENT) { dd_dev_err(dd, "Receive header queue count %d must be divisible by %lu\n", thecnt, HDRQ_INCREMENT); return -EINVAL; } return 0; } /** * set_hdrq_regs - set header queue registers for context * @dd: the device data * @ctxt: the context * @entsize: the dword entry size * @hdrcnt: the number of header entries */ void set_hdrq_regs(struct hfi1_devdata *dd, u8 ctxt, u8 entsize, u16 hdrcnt) { u64 reg; reg = (((u64)hdrcnt >> HDRQ_SIZE_SHIFT) & RCV_HDR_CNT_CNT_MASK) << RCV_HDR_CNT_CNT_SHIFT; write_kctxt_csr(dd, ctxt, RCV_HDR_CNT, reg); reg = ((u64)encode_rcv_header_entry_size(entsize) & RCV_HDR_ENT_SIZE_ENT_SIZE_MASK) << RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT; write_kctxt_csr(dd, ctxt, RCV_HDR_ENT_SIZE, reg); reg = ((u64)DEFAULT_RCVHDRSIZE & RCV_HDR_SIZE_HDR_SIZE_MASK) << RCV_HDR_SIZE_HDR_SIZE_SHIFT; write_kctxt_csr(dd, ctxt, RCV_HDR_SIZE, reg); /* * Program dummy tail address for every receive context * before enabling any receive context */ write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, dd->rcvhdrtail_dummy_dma); } void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, struct hfi1_ctxtdata *rcd) { u64 rcvctrl, reg; int did_enable = 0; u16 ctxt; if (!rcd) return; ctxt = rcd->ctxt; hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op); rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL); /* if the context already enabled, don't do the extra steps */ if ((op & HFI1_RCVCTRL_CTXT_ENB) && !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) { /* reset the tail and hdr addresses, and sequence count */ write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR, rcd->rcvhdrq_dma); if (hfi1_rcvhdrtail_kvaddr(rcd)) write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, rcd->rcvhdrqtailaddr_dma); hfi1_set_seq_cnt(rcd, 1); /* reset the cached receive header queue head value */ hfi1_set_rcd_head(rcd, 0); /* * Zero the receive header queue so we don't get false * positives when checking the sequence number. The * sequence numbers could land exactly on the same spot. * E.g. a rcd restart before the receive header wrapped. */ memset(rcd->rcvhdrq, 0, rcvhdrq_size(rcd)); /* starting timeout */ rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr; /* enable the context */ rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK; /* clean the egr buffer size first */ rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK; rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size) & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK) << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT; /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */ write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0); did_enable = 1; /* zero RcvEgrIndexHead */ write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0); /* set eager count and base index */ reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT) & RCV_EGR_CTRL_EGR_CNT_MASK) << RCV_EGR_CTRL_EGR_CNT_SHIFT) | (((rcd->eager_base >> RCV_SHIFT) & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK) << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT); write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg); /* * Set TID (expected) count and base index. * rcd->expected_count is set to individual RcvArray entries, * not pairs, and the CSR takes a pair-count in groups of * four, so divide by 8. */ reg = (((rcd->expected_count >> RCV_SHIFT) & RCV_TID_CTRL_TID_PAIR_CNT_MASK) << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) | (((rcd->expected_base >> RCV_SHIFT) & RCV_TID_CTRL_TID_BASE_INDEX_MASK) << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT); write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg); if (ctxt == HFI1_CTRL_CTXT) write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT); } if (op & HFI1_RCVCTRL_CTXT_DIS) { write_csr(dd, RCV_VL15, 0); /* * When receive context is being disabled turn on tail * update with a dummy tail address and then disable * receive context. */ if (dd->rcvhdrtail_dummy_dma) { write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, dd->rcvhdrtail_dummy_dma); /* Enabling RcvCtxtCtrl.TailUpd is intentional. */ rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK; } rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK; } if (op & HFI1_RCVCTRL_INTRAVAIL_ENB) { set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt, IS_RCVAVAIL_START + rcd->ctxt, true); rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK; } if (op & HFI1_RCVCTRL_INTRAVAIL_DIS) { set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt, IS_RCVAVAIL_START + rcd->ctxt, false); rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK; } if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && hfi1_rcvhdrtail_kvaddr(rcd)) rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK; if (op & HFI1_RCVCTRL_TAILUPD_DIS) { /* See comment on RcvCtxtCtrl.TailUpd above */ if (!(op & HFI1_RCVCTRL_CTXT_DIS)) rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK; } if (op & HFI1_RCVCTRL_TIDFLOW_ENB) rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK; if (op & HFI1_RCVCTRL_TIDFLOW_DIS) rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK; if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) { /* * In one-packet-per-eager mode, the size comes from * the RcvArray entry. */ rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK; rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK; } if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS) rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK; if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB) rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK; if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS) rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK; if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB) rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK; if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS) rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK; if (op & HFI1_RCVCTRL_URGENT_ENB) set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt, IS_RCVURGENT_START + rcd->ctxt, true); if (op & HFI1_RCVCTRL_URGENT_DIS) set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt, IS_RCVURGENT_START + rcd->ctxt, false); hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx", ctxt, rcvctrl); write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcvctrl); /* work around sticky RcvCtxtStatus.BlockedRHQFull */ if (did_enable && (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) { reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS); if (reg != 0) { dd_dev_info(dd, "ctxt %d status %lld (blocked)\n", ctxt, reg); read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD); write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10); write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00); read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD); reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS); dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n", ctxt, reg, reg == 0 ? "not" : "still"); } } if (did_enable) { /* * The interrupt timeout and count must be set after * the context is enabled to take effect. */ /* set interrupt timeout */ write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT, (u64)rcd->rcvavail_timeout << RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT); /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */ reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT; write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg); } if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS)) /* * If the context has been disabled and the Tail Update has * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address * so it doesn't contain an address that is invalid. */ write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, dd->rcvhdrtail_dummy_dma); } u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp) { int ret; u64 val = 0; if (namep) { ret = dd->cntrnameslen; *namep = dd->cntrnames; } else { const struct cntr_entry *entry; int i, j; ret = (dd->ndevcntrs) * sizeof(u64); /* Get the start of the block of counters */ *cntrp = dd->cntrs; /* * Now go and fill in each counter in the block. */ for (i = 0; i < DEV_CNTR_LAST; i++) { entry = &dev_cntrs[i]; hfi1_cdbg(CNTR, "reading %s", entry->name); if (entry->flags & CNTR_DISABLED) { /* Nothing */ hfi1_cdbg(CNTR, "\tDisabled"); } else { if (entry->flags & CNTR_VL) { hfi1_cdbg(CNTR, "\tPer VL"); for (j = 0; j < C_VL_COUNT; j++) { val = entry->rw_cntr(entry, dd, j, CNTR_MODE_R, 0); hfi1_cdbg( CNTR, "\t\tRead 0x%llx for %d", val, j); dd->cntrs[entry->offset + j] = val; } } else if (entry->flags & CNTR_SDMA) { hfi1_cdbg(CNTR, "\t Per SDMA Engine"); for (j = 0; j < chip_sdma_engines(dd); j++) { val = entry->rw_cntr(entry, dd, j, CNTR_MODE_R, 0); hfi1_cdbg(CNTR, "\t\tRead 0x%llx for %d", val, j); dd->cntrs[entry->offset + j] = val; } } else { val = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0); dd->cntrs[entry->offset] = val; hfi1_cdbg(CNTR, "\tRead 0x%llx", val); } } } } return ret; } /* * Used by sysfs to create files for hfi stats to read */ u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp) { int ret; u64 val = 0; if (namep) { ret = ppd->dd->portcntrnameslen; *namep = ppd->dd->portcntrnames; } else { const struct cntr_entry *entry; int i, j; ret = ppd->dd->nportcntrs * sizeof(u64); *cntrp = ppd->cntrs; for (i = 0; i < PORT_CNTR_LAST; i++) { entry = &port_cntrs[i]; hfi1_cdbg(CNTR, "reading %s", entry->name); if (entry->flags & CNTR_DISABLED) { /* Nothing */ hfi1_cdbg(CNTR, "\tDisabled"); continue; } if (entry->flags & CNTR_VL) { hfi1_cdbg(CNTR, "\tPer VL"); for (j = 0; j < C_VL_COUNT; j++) { val = entry->rw_cntr(entry, ppd, j, CNTR_MODE_R, 0); hfi1_cdbg( CNTR, "\t\tRead 0x%llx for %d", val, j); ppd->cntrs[entry->offset + j] = val; } } else { val = entry->rw_cntr(entry, ppd, CNTR_INVALID_VL, CNTR_MODE_R, 0); ppd->cntrs[entry->offset] = val; hfi1_cdbg(CNTR, "\tRead 0x%llx", val); } } } return ret; } static void free_cntrs(struct hfi1_devdata *dd) { struct hfi1_pportdata *ppd; int i; if (dd->synth_stats_timer.function) del_timer_sync(&dd->synth_stats_timer); cancel_work_sync(&dd->update_cntr_work); ppd = (struct hfi1_pportdata *)(dd + 1); for (i = 0; i < dd->num_pports; i++, ppd++) { kfree(ppd->cntrs); kfree(ppd->scntrs); free_percpu(ppd->ibport_data.rvp.rc_acks); free_percpu(ppd->ibport_data.rvp.rc_qacks); free_percpu(ppd->ibport_data.rvp.rc_delayed_comp); ppd->cntrs = NULL; ppd->scntrs = NULL; ppd->ibport_data.rvp.rc_acks = NULL; ppd->ibport_data.rvp.rc_qacks = NULL; ppd->ibport_data.rvp.rc_delayed_comp = NULL; } kfree(dd->portcntrnames); dd->portcntrnames = NULL; kfree(dd->cntrs); dd->cntrs = NULL; kfree(dd->scntrs); dd->scntrs = NULL; kfree(dd->cntrnames); dd->cntrnames = NULL; if (dd->update_cntr_wq) { destroy_workqueue(dd->update_cntr_wq); dd->update_cntr_wq = NULL; } } static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry, u64 *psval, void *context, int vl) { u64 val; u64 sval = *psval; if (entry->flags & CNTR_DISABLED) { dd_dev_err(dd, "Counter %s not enabled", entry->name); return 0; } hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval); val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0); /* If its a synthetic counter there is more work we need to do */ if (entry->flags & CNTR_SYNTH) { if (sval == CNTR_MAX) { /* No need to read already saturated */ return CNTR_MAX; } if (entry->flags & CNTR_32BIT) { /* 32bit counters can wrap multiple times */ u64 upper = sval >> 32; u64 lower = (sval << 32) >> 32; if (lower > val) { /* hw wrapped */ if (upper == CNTR_32BIT_MAX) val = CNTR_MAX; else upper++; } if (val != CNTR_MAX) val = (upper << 32) | val; } else { /* If we rolled we are saturated */ if ((val < sval) || (val > CNTR_MAX)) val = CNTR_MAX; } } *psval = val; hfi1_cdbg(CNTR, "\tNew val=0x%llx", val); return val; } static u64 write_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry, u64 *psval, void *context, int vl, u64 data) { u64 val; if (entry->flags & CNTR_DISABLED) { dd_dev_err(dd, "Counter %s not enabled", entry->name); return 0; } hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval); if (entry->flags & CNTR_SYNTH) { *psval = data; if (entry->flags & CNTR_32BIT) { val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, (data << 32) >> 32); val = data; /* return the full 64bit value */ } else { val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data); } } else { val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data); } *psval = val; hfi1_cdbg(CNTR, "\tNew val=0x%llx", val); return val; } u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl) { struct cntr_entry *entry; u64 *sval; entry = &dev_cntrs[index]; sval = dd->scntrs + entry->offset; if (vl != CNTR_INVALID_VL) sval += vl; return read_dev_port_cntr(dd, entry, sval, dd, vl); } u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data) { struct cntr_entry *entry; u64 *sval; entry = &dev_cntrs[index]; sval = dd->scntrs + entry->offset; if (vl != CNTR_INVALID_VL) sval += vl; return write_dev_port_cntr(dd, entry, sval, dd, vl, data); } u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl) { struct cntr_entry *entry; u64 *sval; entry = &port_cntrs[index]; sval = ppd->scntrs + entry->offset; if (vl != CNTR_INVALID_VL) sval += vl; if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) && (index <= C_RCV_HDR_OVF_LAST)) { /* We do not want to bother for disabled contexts */ return 0; } return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl); } u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data) { struct cntr_entry *entry; u64 *sval; entry = &port_cntrs[index]; sval = ppd->scntrs + entry->offset; if (vl != CNTR_INVALID_VL) sval += vl; if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) && (index <= C_RCV_HDR_OVF_LAST)) { /* We do not want to bother for disabled contexts */ return 0; } return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data); } static void do_update_synth_timer(struct work_struct *work) { u64 cur_tx; u64 cur_rx; u64 total_flits; u8 update = 0; int i, j, vl; struct hfi1_pportdata *ppd; struct cntr_entry *entry; struct hfi1_devdata *dd = container_of(work, struct hfi1_devdata, update_cntr_work); /* * Rather than keep beating on the CSRs pick a minimal set that we can * check to watch for potential roll over. We can do this by looking at * the number of flits sent/recv. If the total flits exceeds 32bits then * we have to iterate all the counters and update. */ entry = &dev_cntrs[C_DC_RCV_FLITS]; cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0); entry = &dev_cntrs[C_DC_XMIT_FLITS]; cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0); hfi1_cdbg( CNTR, "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx", dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx); if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) { /* * May not be strictly necessary to update but it won't hurt and * simplifies the logic here. */ update = 1; hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating", dd->unit); } else { total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx); hfi1_cdbg(CNTR, "[%d] total flits 0x%llx limit 0x%llx", dd->unit, total_flits, (u64)CNTR_32BIT_MAX); if (total_flits >= CNTR_32BIT_MAX) { hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating", dd->unit); update = 1; } } if (update) { hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit); for (i = 0; i < DEV_CNTR_LAST; i++) { entry = &dev_cntrs[i]; if (entry->flags & CNTR_VL) { for (vl = 0; vl < C_VL_COUNT; vl++) read_dev_cntr(dd, i, vl); } else { read_dev_cntr(dd, i, CNTR_INVALID_VL); } } ppd = (struct hfi1_pportdata *)(dd + 1); for (i = 0; i < dd->num_pports; i++, ppd++) { for (j = 0; j < PORT_CNTR_LAST; j++) { entry = &port_cntrs[j]; if (entry->flags & CNTR_VL) { for (vl = 0; vl < C_VL_COUNT; vl++) read_port_cntr(ppd, j, vl); } else { read_port_cntr(ppd, j, CNTR_INVALID_VL); } } } /* * We want the value in the register. The goal is to keep track * of the number of "ticks" not the counter value. In other * words if the register rolls we want to notice it and go ahead * and force an update. */ entry = &dev_cntrs[C_DC_XMIT_FLITS]; dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0); entry = &dev_cntrs[C_DC_RCV_FLITS]; dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0); hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx", dd->unit, dd->last_tx, dd->last_rx); } else { hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit); } } static void update_synth_timer(struct timer_list *t) { struct hfi1_devdata *dd = from_timer(dd, t, synth_stats_timer); queue_work(dd->update_cntr_wq, &dd->update_cntr_work); mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME); } #define C_MAX_NAME 16 /* 15 chars + one for /0 */ static int init_cntrs(struct hfi1_devdata *dd) { int i, rcv_ctxts, j; size_t sz; char *p; char name[C_MAX_NAME]; struct hfi1_pportdata *ppd; const char *bit_type_32 = ",32"; const int bit_type_32_sz = strlen(bit_type_32); u32 sdma_engines = chip_sdma_engines(dd); /* set up the stats timer; the add_timer is done at the end */ timer_setup(&dd->synth_stats_timer, update_synth_timer, 0); /***********************/ /* per device counters */ /***********************/ /* size names and determine how many we have*/ dd->ndevcntrs = 0; sz = 0; for (i = 0; i < DEV_CNTR_LAST; i++) { if (dev_cntrs[i].flags & CNTR_DISABLED) { hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name); continue; } if (dev_cntrs[i].flags & CNTR_VL) { dev_cntrs[i].offset = dd->ndevcntrs; for (j = 0; j < C_VL_COUNT; j++) { snprintf(name, C_MAX_NAME, "%s%d", dev_cntrs[i].name, vl_from_idx(j)); sz += strlen(name); /* Add ",32" for 32-bit counters */ if (dev_cntrs[i].flags & CNTR_32BIT) sz += bit_type_32_sz; sz++; dd->ndevcntrs++; } } else if (dev_cntrs[i].flags & CNTR_SDMA) { dev_cntrs[i].offset = dd->ndevcntrs; for (j = 0; j < sdma_engines; j++) { snprintf(name, C_MAX_NAME, "%s%d", dev_cntrs[i].name, j); sz += strlen(name); /* Add ",32" for 32-bit counters */ if (dev_cntrs[i].flags & CNTR_32BIT) sz += bit_type_32_sz; sz++; dd->ndevcntrs++; } } else { /* +1 for newline. */ sz += strlen(dev_cntrs[i].name) + 1; /* Add ",32" for 32-bit counters */ if (dev_cntrs[i].flags & CNTR_32BIT) sz += bit_type_32_sz; dev_cntrs[i].offset = dd->ndevcntrs; dd->ndevcntrs++; } } /* allocate space for the counter values */ dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64), GFP_KERNEL); if (!dd->cntrs) goto bail; dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL); if (!dd->scntrs) goto bail; /* allocate space for the counter names */ dd->cntrnameslen = sz; dd->cntrnames = kmalloc(sz, GFP_KERNEL); if (!dd->cntrnames) goto bail; /* fill in the names */ for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) { if (dev_cntrs[i].flags & CNTR_DISABLED) { /* Nothing */ } else if (dev_cntrs[i].flags & CNTR_VL) { for (j = 0; j < C_VL_COUNT; j++) { snprintf(name, C_MAX_NAME, "%s%d", dev_cntrs[i].name, vl_from_idx(j)); memcpy(p, name, strlen(name)); p += strlen(name); /* Counter is 32 bits */ if (dev_cntrs[i].flags & CNTR_32BIT) { memcpy(p, bit_type_32, bit_type_32_sz); p += bit_type_32_sz; } *p++ = '\n'; } } else if (dev_cntrs[i].flags & CNTR_SDMA) { for (j = 0; j < sdma_engines; j++) { snprintf(name, C_MAX_NAME, "%s%d", dev_cntrs[i].name, j); memcpy(p, name, strlen(name)); p += strlen(name); /* Counter is 32 bits */ if (dev_cntrs[i].flags & CNTR_32BIT) { memcpy(p, bit_type_32, bit_type_32_sz); p += bit_type_32_sz; } *p++ = '\n'; } } else { memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name)); p += strlen(dev_cntrs[i].name); /* Counter is 32 bits */ if (dev_cntrs[i].flags & CNTR_32BIT) { memcpy(p, bit_type_32, bit_type_32_sz); p += bit_type_32_sz; } *p++ = '\n'; } } /*********************/ /* per port counters */ /*********************/ /* * Go through the counters for the overflows and disable the ones we * don't need. This varies based on platform so we need to do it * dynamically here. */ rcv_ctxts = dd->num_rcv_contexts; for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts; i <= C_RCV_HDR_OVF_LAST; i++) { port_cntrs[i].flags |= CNTR_DISABLED; } /* size port counter names and determine how many we have*/ sz = 0; dd->nportcntrs = 0; for (i = 0; i < PORT_CNTR_LAST; i++) { if (port_cntrs[i].flags & CNTR_DISABLED) { hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name); continue; } if (port_cntrs[i].flags & CNTR_VL) { port_cntrs[i].offset = dd->nportcntrs; for (j = 0; j < C_VL_COUNT; j++) { snprintf(name, C_MAX_NAME, "%s%d", port_cntrs[i].name, vl_from_idx(j)); sz += strlen(name); /* Add ",32" for 32-bit counters */ if (port_cntrs[i].flags & CNTR_32BIT) sz += bit_type_32_sz; sz++; dd->nportcntrs++; } } else { /* +1 for newline */ sz += strlen(port_cntrs[i].name) + 1; /* Add ",32" for 32-bit counters */ if (port_cntrs[i].flags & CNTR_32BIT) sz += bit_type_32_sz; port_cntrs[i].offset = dd->nportcntrs; dd->nportcntrs++; } } /* allocate space for the counter names */ dd->portcntrnameslen = sz; dd->portcntrnames = kmalloc(sz, GFP_KERNEL); if (!dd->portcntrnames) goto bail; /* fill in port cntr names */ for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) { if (port_cntrs[i].flags & CNTR_DISABLED) continue; if (port_cntrs[i].flags & CNTR_VL) { for (j = 0; j < C_VL_COUNT; j++) { snprintf(name, C_MAX_NAME, "%s%d", port_cntrs[i].name, vl_from_idx(j)); memcpy(p, name, strlen(name)); p += strlen(name); /* Counter is 32 bits */ if (port_cntrs[i].flags & CNTR_32BIT) { memcpy(p, bit_type_32, bit_type_32_sz); p += bit_type_32_sz; } *p++ = '\n'; } } else { memcpy(p, port_cntrs[i].name, strlen(port_cntrs[i].name)); p += strlen(port_cntrs[i].name); /* Counter is 32 bits */ if (port_cntrs[i].flags & CNTR_32BIT) { memcpy(p, bit_type_32, bit_type_32_sz); p += bit_type_32_sz; } *p++ = '\n'; } } /* allocate per port storage for counter values */ ppd = (struct hfi1_pportdata *)(dd + 1); for (i = 0; i < dd->num_pports; i++, ppd++) { ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL); if (!ppd->cntrs) goto bail; ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL); if (!ppd->scntrs) goto bail; } /* CPU counters need to be allocated and zeroed */ if (init_cpu_counters(dd)) goto bail; dd->update_cntr_wq = alloc_ordered_workqueue("hfi1_update_cntr_%d", WQ_MEM_RECLAIM, dd->unit); if (!dd->update_cntr_wq) goto bail; INIT_WORK(&dd->update_cntr_work, do_update_synth_timer); mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME); return 0; bail: free_cntrs(dd); return -ENOMEM; } static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate) { switch (chip_lstate) { case LSTATE_DOWN: return IB_PORT_DOWN; case LSTATE_INIT: return IB_PORT_INIT; case LSTATE_ARMED: return IB_PORT_ARMED; case LSTATE_ACTIVE: return IB_PORT_ACTIVE; default: dd_dev_err(dd, "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n", chip_lstate); return IB_PORT_DOWN; } } u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate) { /* look at the HFI meta-states only */ switch (chip_pstate & 0xf0) { case PLS_DISABLED: return IB_PORTPHYSSTATE_DISABLED; case PLS_OFFLINE: return OPA_PORTPHYSSTATE_OFFLINE; case PLS_POLLING: return IB_PORTPHYSSTATE_POLLING; case PLS_CONFIGPHY: return IB_PORTPHYSSTATE_TRAINING; case PLS_LINKUP: return IB_PORTPHYSSTATE_LINKUP; case PLS_PHYTEST: return IB_PORTPHYSSTATE_PHY_TEST; default: dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n", chip_pstate); return IB_PORTPHYSSTATE_DISABLED; } } /* return the OPA port logical state name */ const char *opa_lstate_name(u32 lstate) { static const char * const port_logical_names[] = { "PORT_NOP", "PORT_DOWN", "PORT_INIT", "PORT_ARMED", "PORT_ACTIVE", "PORT_ACTIVE_DEFER", }; if (lstate < ARRAY_SIZE(port_logical_names)) return port_logical_names[lstate]; return "unknown"; } /* return the OPA port physical state name */ const char *opa_pstate_name(u32 pstate) { static const char * const port_physical_names[] = { "PHYS_NOP", "reserved1", "PHYS_POLL", "PHYS_DISABLED", "PHYS_TRAINING", "PHYS_LINKUP", "PHYS_LINK_ERR_RECOVER", "PHYS_PHY_TEST", "reserved8", "PHYS_OFFLINE", "PHYS_GANGED", "PHYS_TEST", }; if (pstate < ARRAY_SIZE(port_physical_names)) return port_physical_names[pstate]; return "unknown"; } /** * update_statusp - Update userspace status flag * @ppd: Port data structure * @state: port state information * * Actual port status is determined by the host_link_state value * in the ppd. * * host_link_state MUST be updated before updating the user space * statusp. */ static void update_statusp(struct hfi1_pportdata *ppd, u32 state) { /* * Set port status flags in the page mapped into userspace * memory. Do it here to ensure a reliable state - this is * the only function called by all state handling code. * Always set the flags due to the fact that the cache value * might have been changed explicitly outside of this * function. */ if (ppd->statusp) { switch (state) { case IB_PORT_DOWN: case IB_PORT_INIT: *ppd->statusp &= ~(HFI1_STATUS_IB_CONF | HFI1_STATUS_IB_READY); break; case IB_PORT_ARMED: *ppd->statusp |= HFI1_STATUS_IB_CONF; break; case IB_PORT_ACTIVE: *ppd->statusp |= HFI1_STATUS_IB_READY; break; } } dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n", opa_lstate_name(state), state); } /** * wait_logical_linkstate - wait for an IB link state change to occur * @ppd: port device * @state: the state to wait for * @msecs: the number of milliseconds to wait * * Wait up to msecs milliseconds for IB link state change to occur. * For now, take the easy polling route. * Returns 0 if state reached, otherwise -ETIMEDOUT. */ static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state, int msecs) { unsigned long timeout; u32 new_state; timeout = jiffies + msecs_to_jiffies(msecs); while (1) { new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd)); if (new_state == state) break; if (time_after(jiffies, timeout)) { dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state); return -ETIMEDOUT; } msleep(20); } return 0; } static void log_state_transition(struct hfi1_pportdata *ppd, u32 state) { u32 ib_pstate = chip_to_opa_pstate(ppd->dd, state); dd_dev_info(ppd->dd, "physical state changed to %s (0x%x), phy 0x%x\n", opa_pstate_name(ib_pstate), ib_pstate, state); } /* * Read the physical hardware link state and check if it matches host * drivers anticipated state. */ static void log_physical_state(struct hfi1_pportdata *ppd, u32 state) { u32 read_state = read_physical_state(ppd->dd); if (read_state == state) { log_state_transition(ppd, state); } else { dd_dev_err(ppd->dd, "anticipated phy link state 0x%x, read 0x%x\n", state, read_state); } } /* * wait_physical_linkstate - wait for an physical link state change to occur * @ppd: port device * @state: the state to wait for * @msecs: the number of milliseconds to wait * * Wait up to msecs milliseconds for physical link state change to occur. * Returns 0 if state reached, otherwise -ETIMEDOUT. */ static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state, int msecs) { u32 read_state; unsigned long timeout; timeout = jiffies + msecs_to_jiffies(msecs); while (1) { read_state = read_physical_state(ppd->dd); if (read_state == state) break; if (time_after(jiffies, timeout)) { dd_dev_err(ppd->dd, "timeout waiting for phy link state 0x%x\n", state); return -ETIMEDOUT; } usleep_range(1950, 2050); /* sleep 2ms-ish */ } log_state_transition(ppd, state); return 0; } /* * wait_phys_link_offline_quiet_substates - wait for any offline substate * @ppd: port device * @msecs: the number of milliseconds to wait * * Wait up to msecs milliseconds for any offline physical link * state change to occur. * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT. */ static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd, int msecs) { u32 read_state; unsigned long timeout; timeout = jiffies + msecs_to_jiffies(msecs); while (1) { read_state = read_physical_state(ppd->dd); if ((read_state & 0xF0) == PLS_OFFLINE) break; if (time_after(jiffies, timeout)) { dd_dev_err(ppd->dd, "timeout waiting for phy link offline.quiet substates. Read state 0x%x, %dms\n", read_state, msecs); return -ETIMEDOUT; } usleep_range(1950, 2050); /* sleep 2ms-ish */ } log_state_transition(ppd, read_state); return read_state; } /* * wait_phys_link_out_of_offline - wait for any out of offline state * @ppd: port device * @msecs: the number of milliseconds to wait * * Wait up to msecs milliseconds for any out of offline physical link * state change to occur. * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT. */ static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd, int msecs) { u32 read_state; unsigned long timeout; timeout = jiffies + msecs_to_jiffies(msecs); while (1) { read_state = read_physical_state(ppd->dd); if ((read_state & 0xF0) != PLS_OFFLINE) break; if (time_after(jiffies, timeout)) { dd_dev_err(ppd->dd, "timeout waiting for phy link out of offline. Read state 0x%x, %dms\n", read_state, msecs); return -ETIMEDOUT; } usleep_range(1950, 2050); /* sleep 2ms-ish */ } log_state_transition(ppd, read_state); return read_state; } #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \ (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK) #define SET_STATIC_RATE_CONTROL_SMASK(r) \ (r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK) void hfi1_init_ctxt(struct send_context *sc) { if (sc) { struct hfi1_devdata *dd = sc->dd; u64 reg; u8 set = (sc->type == SC_USER ? HFI1_CAP_IS_USET(STATIC_RATE_CTRL) : HFI1_CAP_IS_KSET(STATIC_RATE_CTRL)); reg = read_kctxt_csr(dd, sc->hw_context, SEND_CTXT_CHECK_ENABLE); if (set) CLEAR_STATIC_RATE_CONTROL_SMASK(reg); else SET_STATIC_RATE_CONTROL_SMASK(reg); write_kctxt_csr(dd, sc->hw_context, SEND_CTXT_CHECK_ENABLE, reg); } } int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp) { int ret = 0; u64 reg; if (dd->icode != ICODE_RTL_SILICON) { if (HFI1_CAP_IS_KSET(PRINT_UNIMPL)) dd_dev_info(dd, "%s: tempsense not supported by HW\n", __func__); return -EINVAL; } reg = read_csr(dd, ASIC_STS_THERM); temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) & ASIC_STS_THERM_CURR_TEMP_MASK); temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) & ASIC_STS_THERM_LO_TEMP_MASK); temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) & ASIC_STS_THERM_HI_TEMP_MASK); temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) & ASIC_STS_THERM_CRIT_TEMP_MASK); /* triggers is a 3-bit value - 1 bit per trigger. */ temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7); return ret; } /* ========================================================================= */ /** * read_mod_write() - Calculate the IRQ register index and set/clear the bits * @dd: valid devdata * @src: IRQ source to determine register index from * @bits: the bits to set or clear * @set: true == set the bits, false == clear the bits * */ static void read_mod_write(struct hfi1_devdata *dd, u16 src, u64 bits, bool set) { u64 reg; u16 idx = src / BITS_PER_REGISTER; spin_lock(&dd->irq_src_lock); reg = read_csr(dd, CCE_INT_MASK + (8 * idx)); if (set) reg |= bits; else reg &= ~bits; write_csr(dd, CCE_INT_MASK + (8 * idx), reg); spin_unlock(&dd->irq_src_lock); } /** * set_intr_bits() - Enable/disable a range (one or more) IRQ sources * @dd: valid devdata * @first: first IRQ source to set/clear * @last: last IRQ source (inclusive) to set/clear * @set: true == set the bits, false == clear the bits * * If first == last, set the exact source. */ int set_intr_bits(struct hfi1_devdata *dd, u16 first, u16 last, bool set) { u64 bits = 0; u64 bit; u16 src; if (first > NUM_INTERRUPT_SOURCES || last > NUM_INTERRUPT_SOURCES) return -EINVAL; if (last < first) return -ERANGE; for (src = first; src <= last; src++) { bit = src % BITS_PER_REGISTER; /* wrapped to next register? */ if (!bit && bits) { read_mod_write(dd, src - 1, bits, set); bits = 0; } bits |= BIT_ULL(bit); } read_mod_write(dd, last, bits, set); return 0; } /* * Clear all interrupt sources on the chip. */ void clear_all_interrupts(struct hfi1_devdata *dd) { int i; for (i = 0; i < CCE_NUM_INT_CSRS; i++) write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0); write_csr(dd, CCE_ERR_CLEAR, ~(u64)0); write_csr(dd, MISC_ERR_CLEAR, ~(u64)0); write_csr(dd, RCV_ERR_CLEAR, ~(u64)0); write_csr(dd, SEND_ERR_CLEAR, ~(u64)0); write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0); write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0); write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0); for (i = 0; i < chip_send_contexts(dd); i++) write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0); for (i = 0; i < chip_sdma_engines(dd); i++) write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0); write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0); write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0); write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0); } /* * Remap the interrupt source from the general handler to the given MSI-X * interrupt. */ void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr) { u64 reg; int m, n; /* clear from the handled mask of the general interrupt */ m = isrc / 64; n = isrc % 64; if (likely(m < CCE_NUM_INT_CSRS)) { dd->gi_mask[m] &= ~((u64)1 << n); } else { dd_dev_err(dd, "remap interrupt err\n"); return; } /* direct the chip source to the given MSI-X interrupt */ m = isrc / 8; n = isrc % 8; reg = read_csr(dd, CCE_INT_MAP + (8 * m)); reg &= ~((u64)0xff << (8 * n)); reg |= ((u64)msix_intr & 0xff) << (8 * n); write_csr(dd, CCE_INT_MAP + (8 * m), reg); } void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr) { /* * SDMA engine interrupt sources grouped by type, rather than * engine. Per-engine interrupts are as follows: * SDMA * SDMAProgress * SDMAIdle */ remap_intr(dd, IS_SDMA_START + engine, msix_intr); remap_intr(dd, IS_SDMA_PROGRESS_START + engine, msix_intr); remap_intr(dd, IS_SDMA_IDLE_START + engine, msix_intr); } /* * Set the general handler to accept all interrupts, remap all * chip interrupts back to MSI-X 0. */ void reset_interrupts(struct hfi1_devdata *dd) { int i; /* all interrupts handled by the general handler */ for (i = 0; i < CCE_NUM_INT_CSRS; i++) dd->gi_mask[i] = ~(u64)0; /* all chip interrupts map to MSI-X 0 */ for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++) write_csr(dd, CCE_INT_MAP + (8 * i), 0); } /** * set_up_interrupts() - Initialize the IRQ resources and state * @dd: valid devdata * */ static int set_up_interrupts(struct hfi1_devdata *dd) { int ret; /* mask all interrupts */ set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false); /* clear all pending interrupts */ clear_all_interrupts(dd); /* reset general handler mask, chip MSI-X mappings */ reset_interrupts(dd); /* ask for MSI-X interrupts */ ret = msix_initialize(dd); if (ret) return ret; ret = msix_request_irqs(dd); if (ret) msix_clean_up_interrupts(dd); return ret; } /* * Set up context values in dd. Sets: * * num_rcv_contexts - number of contexts being used * n_krcv_queues - number of kernel contexts * first_dyn_alloc_ctxt - first dynamically allocated context * in array of contexts * freectxts - number of free user contexts * num_send_contexts - number of PIO send contexts being used * num_netdev_contexts - number of contexts reserved for netdev */ static int set_up_context_variables(struct hfi1_devdata *dd) { unsigned long num_kernel_contexts; u16 num_netdev_contexts; int ret; unsigned ngroups; int rmt_count; u32 n_usr_ctxts; u32 send_contexts = chip_send_contexts(dd); u32 rcv_contexts = chip_rcv_contexts(dd); /* * Kernel receive contexts: * - Context 0 - control context (VL15/multicast/error) * - Context 1 - first kernel context * - Context 2 - second kernel context * ... */ if (n_krcvqs) /* * n_krcvqs is the sum of module parameter kernel receive * contexts, krcvqs[]. It does not include the control * context, so add that. */ num_kernel_contexts = n_krcvqs + 1; else num_kernel_contexts = DEFAULT_KRCVQS + 1; /* * Every kernel receive context needs an ACK send context. * one send context is allocated for each VL{0-7} and VL15 */ if (num_kernel_contexts > (send_contexts - num_vls - 1)) { dd_dev_err(dd, "Reducing # kernel rcv contexts to: %d, from %lu\n", send_contexts - num_vls - 1, num_kernel_contexts); num_kernel_contexts = send_contexts - num_vls - 1; } /* * User contexts: * - default to 1 user context per real (non-HT) CPU core if * num_user_contexts is negative */ if (num_user_contexts < 0) n_usr_ctxts = cpumask_weight(&node_affinity.real_cpu_mask); else n_usr_ctxts = num_user_contexts; /* * Adjust the counts given a global max. */ if (num_kernel_contexts + n_usr_ctxts > rcv_contexts) { dd_dev_err(dd, "Reducing # user receive contexts to: %u, from %u\n", (u32)(rcv_contexts - num_kernel_contexts), n_usr_ctxts); /* recalculate */ n_usr_ctxts = rcv_contexts - num_kernel_contexts; } num_netdev_contexts = hfi1_num_netdev_contexts(dd, rcv_contexts - (num_kernel_contexts + n_usr_ctxts), &node_affinity.real_cpu_mask); /* * RMT entries are allocated as follows: * 1. QOS (0 to 128 entries) * 2. FECN (num_kernel_context - 1 [a] + num_user_contexts + * num_netdev_contexts [b]) * 3. netdev (NUM_NETDEV_MAP_ENTRIES) * * Notes: * [a] Kernel contexts (except control) are included in FECN if kernel * TID_RDMA is active. * [b] Netdev and user contexts are randomly allocated from the same * context pool, so FECN must cover all contexts in the pool. */ rmt_count = qos_rmt_entries(num_kernel_contexts - 1, NULL, NULL) + (HFI1_CAP_IS_KSET(TID_RDMA) ? num_kernel_contexts - 1 : 0) + n_usr_ctxts + num_netdev_contexts + NUM_NETDEV_MAP_ENTRIES; if (rmt_count > NUM_MAP_ENTRIES) { int over = rmt_count - NUM_MAP_ENTRIES; /* try to squish user contexts, minimum of 1 */ if (over >= n_usr_ctxts) { dd_dev_err(dd, "RMT overflow: reduce the requested number of contexts\n"); return -EINVAL; } dd_dev_err(dd, "RMT overflow: reducing # user contexts from %u to %u\n", n_usr_ctxts, n_usr_ctxts - over); n_usr_ctxts -= over; } /* the first N are kernel contexts, the rest are user/netdev contexts */ dd->num_rcv_contexts = num_kernel_contexts + n_usr_ctxts + num_netdev_contexts; dd->n_krcv_queues = num_kernel_contexts; dd->first_dyn_alloc_ctxt = num_kernel_contexts; dd->num_netdev_contexts = num_netdev_contexts; dd->num_user_contexts = n_usr_ctxts; dd->freectxts = n_usr_ctxts; dd_dev_info(dd, "rcv contexts: chip %d, used %d (kernel %d, netdev %u, user %u)\n", rcv_contexts, (int)dd->num_rcv_contexts, (int)dd->n_krcv_queues, dd->num_netdev_contexts, dd->num_user_contexts); /* * Receive array allocation: * All RcvArray entries are divided into groups of 8. This * is required by the hardware and will speed up writes to * consecutive entries by using write-combining of the entire * cacheline. * * The number of groups are evenly divided among all contexts. * any left over groups will be given to the first N user * contexts. */ dd->rcv_entries.group_size = RCV_INCREMENT; ngroups = chip_rcv_array_count(dd) / dd->rcv_entries.group_size; dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts; dd->rcv_entries.nctxt_extra = ngroups - (dd->num_rcv_contexts * dd->rcv_entries.ngroups); dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n", dd->rcv_entries.ngroups, dd->rcv_entries.nctxt_extra); if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size > MAX_EAGER_ENTRIES * 2) { dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) / dd->rcv_entries.group_size; dd_dev_info(dd, "RcvArray group count too high, change to %u\n", dd->rcv_entries.ngroups); dd->rcv_entries.nctxt_extra = 0; } /* * PIO send contexts */ ret = init_sc_pools_and_sizes(dd); if (ret >= 0) { /* success */ dd->num_send_contexts = ret; dd_dev_info( dd, "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n", send_contexts, dd->num_send_contexts, dd->sc_sizes[SC_KERNEL].count, dd->sc_sizes[SC_ACK].count, dd->sc_sizes[SC_USER].count, dd->sc_sizes[SC_VL15].count); ret = 0; /* success */ } return ret; } /* * Set the device/port partition key table. The MAD code * will ensure that, at least, the partial management * partition key is present in the table. */ static void set_partition_keys(struct hfi1_pportdata *ppd) { struct hfi1_devdata *dd = ppd->dd; u64 reg = 0; int i; dd_dev_info(dd, "Setting partition keys\n"); for (i = 0; i < hfi1_get_npkeys(dd); i++) { reg |= (ppd->pkeys[i] & RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) << ((i % 4) * RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT); /* Each register holds 4 PKey values. */ if ((i % 4) == 3) { write_csr(dd, RCV_PARTITION_KEY + ((i - 3) * 2), reg); reg = 0; } } /* Always enable HW pkeys check when pkeys table is set */ add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK); } /* * These CSRs and memories are uninitialized on reset and must be * written before reading to set the ECC/parity bits. * * NOTE: All user context CSRs that are not mmaped write-only * (e.g. the TID flows) must be initialized even if the driver never * reads them. */ static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd) { int i, j; /* CceIntMap */ for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++) write_csr(dd, CCE_INT_MAP + (8 * i), 0); /* SendCtxtCreditReturnAddr */ for (i = 0; i < chip_send_contexts(dd); i++) write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0); /* PIO Send buffers */ /* SDMA Send buffers */ /* * These are not normally read, and (presently) have no method * to be read, so are not pre-initialized */ /* RcvHdrAddr */ /* RcvHdrTailAddr */ /* RcvTidFlowTable */ for (i = 0; i < chip_rcv_contexts(dd); i++) { write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0); write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0); for (j = 0; j < RXE_NUM_TID_FLOWS; j++) write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0); } /* RcvArray */ for (i = 0; i < chip_rcv_array_count(dd); i++) hfi1_put_tid(dd, i, PT_INVALID_FLUSH, 0, 0); /* RcvQPMapTable */ for (i = 0; i < 32; i++) write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0); } /* * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus. */ static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits, u64 ctrl_bits) { unsigned long timeout; u64 reg; /* is the condition present? */ reg = read_csr(dd, CCE_STATUS); if ((reg & status_bits) == 0) return; /* clear the condition */ write_csr(dd, CCE_CTRL, ctrl_bits); /* wait for the condition to clear */ timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT); while (1) { reg = read_csr(dd, CCE_STATUS); if ((reg & status_bits) == 0) return; if (time_after(jiffies, timeout)) { dd_dev_err(dd, "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n", status_bits, reg & status_bits); return; } udelay(1); } } /* set CCE CSRs to chip reset defaults */ static void reset_cce_csrs(struct hfi1_devdata *dd) { int i; /* CCE_REVISION read-only */ /* CCE_REVISION2 read-only */ /* CCE_CTRL - bits clear automatically */ /* CCE_STATUS read-only, use CceCtrl to clear */ clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK); clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK); clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK); for (i = 0; i < CCE_NUM_SCRATCH; i++) write_csr(dd, CCE_SCRATCH + (8 * i), 0); /* CCE_ERR_STATUS read-only */ write_csr(dd, CCE_ERR_MASK, 0); write_csr(dd, CCE_ERR_CLEAR, ~0ull); /* CCE_ERR_FORCE leave alone */ for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++) write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0); write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR); /* CCE_PCIE_CTRL leave alone */ for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) { write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0); write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i), CCE_MSIX_TABLE_UPPER_RESETCSR); } for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) { /* CCE_MSIX_PBA read-only */ write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull); write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull); } for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++) write_csr(dd, CCE_INT_MAP, 0); for (i = 0; i < CCE_NUM_INT_CSRS; i++) { /* CCE_INT_STATUS read-only */ write_csr(dd, CCE_INT_MASK + (8 * i), 0); write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull); /* CCE_INT_FORCE leave alone */ /* CCE_INT_BLOCKED read-only */ } for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++) write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0); } /* set MISC CSRs to chip reset defaults */ static void reset_misc_csrs(struct hfi1_devdata *dd) { int i; for (i = 0; i < 32; i++) { write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0); write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0); write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0); } /* * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can * only be written 128-byte chunks */ /* init RSA engine to clear lingering errors */ write_csr(dd, MISC_CFG_RSA_CMD, 1); write_csr(dd, MISC_CFG_RSA_MU, 0); write_csr(dd, MISC_CFG_FW_CTRL, 0); /* MISC_STS_8051_DIGEST read-only */ /* MISC_STS_SBM_DIGEST read-only */ /* MISC_STS_PCIE_DIGEST read-only */ /* MISC_STS_FAB_DIGEST read-only */ /* MISC_ERR_STATUS read-only */ write_csr(dd, MISC_ERR_MASK, 0); write_csr(dd, MISC_ERR_CLEAR, ~0ull); /* MISC_ERR_FORCE leave alone */ } /* set TXE CSRs to chip reset defaults */ static void reset_txe_csrs(struct hfi1_devdata *dd) { int i; /* * TXE Kernel CSRs */ write_csr(dd, SEND_CTRL, 0); __cm_reset(dd, 0); /* reset CM internal state */ /* SEND_CONTEXTS read-only */ /* SEND_DMA_ENGINES read-only */ /* SEND_PIO_MEM_SIZE read-only */ /* SEND_DMA_MEM_SIZE read-only */ write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0); pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */ /* SEND_PIO_ERR_STATUS read-only */ write_csr(dd, SEND_PIO_ERR_MASK, 0); write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull); /* SEND_PIO_ERR_FORCE leave alone */ /* SEND_DMA_ERR_STATUS read-only */ write_csr(dd, SEND_DMA_ERR_MASK, 0); write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull); /* SEND_DMA_ERR_FORCE leave alone */ /* SEND_EGRESS_ERR_STATUS read-only */ write_csr(dd, SEND_EGRESS_ERR_MASK, 0); write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull); /* SEND_EGRESS_ERR_FORCE leave alone */ write_csr(dd, SEND_BTH_QP, 0); write_csr(dd, SEND_STATIC_RATE_CONTROL, 0); write_csr(dd, SEND_SC2VLT0, 0); write_csr(dd, SEND_SC2VLT1, 0); write_csr(dd, SEND_SC2VLT2, 0); write_csr(dd, SEND_SC2VLT3, 0); write_csr(dd, SEND_LEN_CHECK0, 0); write_csr(dd, SEND_LEN_CHECK1, 0); /* SEND_ERR_STATUS read-only */ write_csr(dd, SEND_ERR_MASK, 0); write_csr(dd, SEND_ERR_CLEAR, ~0ull); /* SEND_ERR_FORCE read-only */ for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++) write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0); for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++) write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0); for (i = 0; i < chip_send_contexts(dd) / NUM_CONTEXTS_PER_SET; i++) write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0); for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++) write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0); for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++) write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0); write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR); write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR); /* SEND_CM_CREDIT_USED_STATUS read-only */ write_csr(dd, SEND_CM_TIMER_CTRL, 0); write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0); write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0); write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0); write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0); for (i = 0; i < TXE_NUM_DATA_VL; i++) write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0); write_csr(dd, SEND_CM_CREDIT_VL15, 0); /* SEND_CM_CREDIT_USED_VL read-only */ /* SEND_CM_CREDIT_USED_VL15 read-only */ /* SEND_EGRESS_CTXT_STATUS read-only */ /* SEND_EGRESS_SEND_DMA_STATUS read-only */ write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull); /* SEND_EGRESS_ERR_INFO read-only */ /* SEND_EGRESS_ERR_SOURCE read-only */ /* * TXE Per-Context CSRs */ for (i = 0; i < chip_send_contexts(dd); i++) { write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0); write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0); write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0); write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0); write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0); write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull); write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0); write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0); write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0); write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0); write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0); write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0); } /* * TXE Per-SDMA CSRs */ for (i = 0; i < chip_sdma_engines(dd); i++) { write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0); /* SEND_DMA_STATUS read-only */ write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0); write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0); write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0); /* SEND_DMA_HEAD read-only */ write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0); write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0); /* SEND_DMA_IDLE_CNT read-only */ write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0); write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0); /* SEND_DMA_DESC_FETCHED_CNT read-only */ /* SEND_DMA_ENG_ERR_STATUS read-only */ write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0); write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull); /* SEND_DMA_ENG_ERR_FORCE leave alone */ write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0); write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0); write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0); write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0); write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0); write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0); write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0); } } /* * Expect on entry: * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0 */ static void init_rbufs(struct hfi1_devdata *dd) { u64 reg; int count; /* * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are * clear. */ count = 0; while (1) { reg = read_csr(dd, RCV_STATUS); if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0) break; /* * Give up after 1ms - maximum wait time. * * RBuf size is 136KiB. Slowest possible is PCIe Gen1 x1 at * 250MB/s bandwidth. Lower rate to 66% for overhead to get: * 136 KB / (66% * 250MB/s) = 844us */ if (count++ > 500) { dd_dev_err(dd, "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n", __func__, reg); break; } udelay(2); /* do not busy-wait the CSR */ } /* start the init - expect RcvCtrl to be 0 */ write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK); /* * Read to force the write of Rcvtrl.RxRbufInit. There is a brief * period after the write before RcvStatus.RxRbufInitDone is valid. * The delay in the first run through the loop below is sufficient and * required before the first read of RcvStatus.RxRbufInintDone. */ read_csr(dd, RCV_CTRL); /* wait for the init to finish */ count = 0; while (1) { /* delay is required first time through - see above */ udelay(2); /* do not busy-wait the CSR */ reg = read_csr(dd, RCV_STATUS); if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK)) break; /* give up after 100us - slowest possible at 33MHz is 73us */ if (count++ > 50) { dd_dev_err(dd, "%s: RcvStatus.RxRbufInit not set, continuing\n", __func__); break; } } } /* set RXE CSRs to chip reset defaults */ static void reset_rxe_csrs(struct hfi1_devdata *dd) { int i, j; /* * RXE Kernel CSRs */ write_csr(dd, RCV_CTRL, 0); init_rbufs(dd); /* RCV_STATUS read-only */ /* RCV_CONTEXTS read-only */ /* RCV_ARRAY_CNT read-only */ /* RCV_BUF_SIZE read-only */ write_csr(dd, RCV_BTH_QP, 0); write_csr(dd, RCV_MULTICAST, 0); write_csr(dd, RCV_BYPASS, 0); write_csr(dd, RCV_VL15, 0); /* this is a clear-down */ write_csr(dd, RCV_ERR_INFO, RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK); /* RCV_ERR_STATUS read-only */ write_csr(dd, RCV_ERR_MASK, 0); write_csr(dd, RCV_ERR_CLEAR, ~0ull); /* RCV_ERR_FORCE leave alone */ for (i = 0; i < 32; i++) write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0); for (i = 0; i < 4; i++) write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0); for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++) write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0); for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++) write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0); for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++) clear_rsm_rule(dd, i); for (i = 0; i < 32; i++) write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0); /* * RXE Kernel and User Per-Context CSRs */ for (i = 0; i < chip_rcv_contexts(dd); i++) { /* kernel */ write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0); /* RCV_CTXT_STATUS read-only */ write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0); write_kctxt_csr(dd, i, RCV_TID_CTRL, 0); write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0); write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0); write_kctxt_csr(dd, i, RCV_HDR_CNT, 0); write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0); write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0); write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0); write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0); write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0); /* user */ /* RCV_HDR_TAIL read-only */ write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0); /* RCV_EGR_INDEX_TAIL read-only */ write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0); /* RCV_EGR_OFFSET_TAIL read-only */ for (j = 0; j < RXE_NUM_TID_FLOWS; j++) { write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0); } } } /* * Set sc2vl tables. * * They power on to zeros, so to avoid send context errors * they need to be set: * * SC 0-7 -> VL 0-7 (respectively) * SC 15 -> VL 15 * otherwise * -> VL 0 */ static void init_sc2vl_tables(struct hfi1_devdata *dd) { int i; /* init per architecture spec, constrained by hardware capability */ /* HFI maps sent packets */ write_csr(dd, SEND_SC2VLT0, SC2VL_VAL( 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7)); write_csr(dd, SEND_SC2VLT1, SC2VL_VAL( 1, 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15)); write_csr(dd, SEND_SC2VLT2, SC2VL_VAL( 2, 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0)); write_csr(dd, SEND_SC2VLT3, SC2VL_VAL( 3, 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0)); /* DC maps received packets */ write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL( 15_0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15)); write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL( 31_16, 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0, 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0)); /* initialize the cached sc2vl values consistently with h/w */ for (i = 0; i < 32; i++) { if (i < 8 || i == 15) *((u8 *)(dd->sc2vl) + i) = (u8)i; else *((u8 *)(dd->sc2vl) + i) = 0; } } /* * Read chip sizes and then reset parts to sane, disabled, values. We cannot * depend on the chip going through a power-on reset - a driver may be loaded * and unloaded many times. * * Do not write any CSR values to the chip in this routine - there may be * a reset following the (possible) FLR in this routine. * */ static int init_chip(struct hfi1_devdata *dd) { int i; int ret = 0; /* * Put the HFI CSRs in a known state. * Combine this with a DC reset. * * Stop the device from doing anything while we do a * reset. We know there are no other active users of * the device since we are now in charge. Turn off * off all outbound and inbound traffic and make sure * the device does not generate any interrupts. */ /* disable send contexts and SDMA engines */ write_csr(dd, SEND_CTRL, 0); for (i = 0; i < chip_send_contexts(dd); i++) write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0); for (i = 0; i < chip_sdma_engines(dd); i++) write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0); /* disable port (turn off RXE inbound traffic) and contexts */ write_csr(dd, RCV_CTRL, 0); for (i = 0; i < chip_rcv_contexts(dd); i++) write_csr(dd, RCV_CTXT_CTRL, 0); /* mask all interrupt sources */ for (i = 0; i < CCE_NUM_INT_CSRS; i++) write_csr(dd, CCE_INT_MASK + (8 * i), 0ull); /* * DC Reset: do a full DC reset before the register clear. * A recommended length of time to hold is one CSR read, * so reread the CceDcCtrl. Then, hold the DC in reset * across the clear. */ write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK); (void)read_csr(dd, CCE_DC_CTRL); if (use_flr) { /* * A FLR will reset the SPC core and part of the PCIe. * The parts that need to be restored have already been * saved. */ dd_dev_info(dd, "Resetting CSRs with FLR\n"); /* do the FLR, the DC reset will remain */ pcie_flr(dd->pcidev); /* restore command and BARs */ ret = restore_pci_variables(dd); if (ret) { dd_dev_err(dd, "%s: Could not restore PCI variables\n", __func__); return ret; } if (is_ax(dd)) { dd_dev_info(dd, "Resetting CSRs with FLR\n"); pcie_flr(dd->pcidev); ret = restore_pci_variables(dd); if (ret) { dd_dev_err(dd, "%s: Could not restore PCI variables\n", __func__); return ret; } } } else { dd_dev_info(dd, "Resetting CSRs with writes\n"); reset_cce_csrs(dd); reset_txe_csrs(dd); reset_rxe_csrs(dd); reset_misc_csrs(dd); } /* clear the DC reset */ write_csr(dd, CCE_DC_CTRL, 0); /* Set the LED off */ setextled(dd, 0); /* * Clear the QSFP reset. * An FLR enforces a 0 on all out pins. The driver does not touch * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and * anything plugged constantly in reset, if it pays attention * to RESET_N. * Prime examples of this are optical cables. Set all pins high. * I2CCLK and I2CDAT will change per direction, and INT_N and * MODPRS_N are input only and their value is ignored. */ write_csr(dd, ASIC_QSFP1_OUT, 0x1f); write_csr(dd, ASIC_QSFP2_OUT, 0x1f); init_chip_resources(dd); return ret; } static void init_early_variables(struct hfi1_devdata *dd) { int i; /* assign link credit variables */ dd->vau = CM_VAU; dd->link_credits = CM_GLOBAL_CREDITS; if (is_ax(dd)) dd->link_credits--; dd->vcu = cu_to_vcu(hfi1_cu); /* enough room for 8 MAD packets plus header - 17K */ dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau); if (dd->vl15_init > dd->link_credits) dd->vl15_init = dd->link_credits; write_uninitialized_csrs_and_memories(dd); if (HFI1_CAP_IS_KSET(PKEY_CHECK)) for (i = 0; i < dd->num_pports; i++) { struct hfi1_pportdata *ppd = &dd->pport[i]; set_partition_keys(ppd); } init_sc2vl_tables(dd); } static void init_kdeth_qp(struct hfi1_devdata *dd) { write_csr(dd, SEND_BTH_QP, (RVT_KDETH_QP_PREFIX & SEND_BTH_QP_KDETH_QP_MASK) << SEND_BTH_QP_KDETH_QP_SHIFT); write_csr(dd, RCV_BTH_QP, (RVT_KDETH_QP_PREFIX & RCV_BTH_QP_KDETH_QP_MASK) << RCV_BTH_QP_KDETH_QP_SHIFT); } /** * hfi1_get_qp_map - get qp map * @dd: device data * @idx: index to read */ u8 hfi1_get_qp_map(struct hfi1_devdata *dd, u8 idx) { u64 reg = read_csr(dd, RCV_QP_MAP_TABLE + (idx / 8) * 8); reg >>= (idx % 8) * 8; return reg; } /** * init_qpmap_table - init qp map * @dd: device data * @first_ctxt: first context * @last_ctxt: first context * * This return sets the qpn mapping table that * is indexed by qpn[8:1]. * * The routine will round robin the 256 settings * from first_ctxt to last_ctxt. * * The first/last looks ahead to having specialized * receive contexts for mgmt and bypass. Normal * verbs traffic will assumed to be on a range * of receive contexts. */ static void init_qpmap_table(struct hfi1_devdata *dd, u32 first_ctxt, u32 last_ctxt) { u64 reg = 0; u64 regno = RCV_QP_MAP_TABLE; int i; u64 ctxt = first_ctxt; for (i = 0; i < 256; i++) { reg |= ctxt << (8 * (i % 8)); ctxt++; if (ctxt > last_ctxt) ctxt = first_ctxt; if (i % 8 == 7) { write_csr(dd, regno, reg); reg = 0; regno += 8; } } add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK); } struct rsm_map_table { u64 map[NUM_MAP_REGS]; unsigned int used; }; struct rsm_rule_data { u8 offset; u8 pkt_type; u32 field1_off; u32 field2_off; u32 index1_off; u32 index1_width; u32 index2_off; u32 index2_width; u32 mask1; u32 value1; u32 mask2; u32 value2; }; /* * Return an initialized RMT map table for users to fill in. OK if it * returns NULL, indicating no table. */ static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd) { struct rsm_map_table *rmt; u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */ rmt = kmalloc(sizeof(*rmt), GFP_KERNEL); if (rmt) { memset(rmt->map, rxcontext, sizeof(rmt->map)); rmt->used = 0; } return rmt; } /* * Write the final RMT map table to the chip and free the table. OK if * table is NULL. */ static void complete_rsm_map_table(struct hfi1_devdata *dd, struct rsm_map_table *rmt) { int i; if (rmt) { /* write table to chip */ for (i = 0; i < NUM_MAP_REGS; i++) write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]); /* enable RSM */ add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK); } } /* Is a receive side mapping rule */ static bool has_rsm_rule(struct hfi1_devdata *dd, u8 rule_index) { return read_csr(dd, RCV_RSM_CFG + (8 * rule_index)) != 0; } /* * Add a receive side mapping rule. */ static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index, struct rsm_rule_data *rrd) { write_csr(dd, RCV_RSM_CFG + (8 * rule_index), (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT | 1ull << rule_index | /* enable bit */ (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT); write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT | (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT | (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT | (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT | (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT | (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT); write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT | (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT | (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT | (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT); } /* * Clear a receive side mapping rule. */ static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index) { write_csr(dd, RCV_RSM_CFG + (8 * rule_index), 0); write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), 0); write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), 0); } /* return the number of RSM map table entries that will be used for QOS */ static int qos_rmt_entries(unsigned int n_krcv_queues, unsigned int *mp, unsigned int *np) { int i; unsigned int m, n; uint max_by_vl = 0; /* is QOS active at all? */ if (n_krcv_queues < MIN_KERNEL_KCTXTS || num_vls == 1 || krcvqsset <= 1) goto no_qos; /* determine bits for qpn */ for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++) if (krcvqs[i] > max_by_vl) max_by_vl = krcvqs[i]; if (max_by_vl > 32) goto no_qos; m = ilog2(__roundup_pow_of_two(max_by_vl)); /* determine bits for vl */ n = ilog2(__roundup_pow_of_two(num_vls)); /* reject if too much is used */ if ((m + n) > 7) goto no_qos; if (mp) *mp = m; if (np) *np = n; return 1 << (m + n); no_qos: if (mp) *mp = 0; if (np) *np = 0; return 0; } /** * init_qos - init RX qos * @dd: device data * @rmt: RSM map table * * This routine initializes Rule 0 and the RSM map table to implement * quality of service (qos). * * If all of the limit tests succeed, qos is applied based on the array * interpretation of krcvqs where entry 0 is VL0. * * The number of vl bits (n) and the number of qpn bits (m) are computed to * feed both the RSM map table and the single rule. */ static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt) { struct rsm_rule_data rrd; unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m; unsigned int rmt_entries; u64 reg; if (!rmt) goto bail; rmt_entries = qos_rmt_entries(dd->n_krcv_queues - 1, &m, &n); if (rmt_entries == 0) goto bail; qpns_per_vl = 1 << m; /* enough room in the map table? */ rmt_entries = 1 << (m + n); if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES) goto bail; /* add qos entries to the RSM map table */ for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) { unsigned tctxt; for (qpn = 0, tctxt = ctxt; krcvqs[i] && qpn < qpns_per_vl; qpn++) { unsigned idx, regoff, regidx; /* generate the index the hardware will produce */ idx = rmt->used + ((qpn << n) ^ i); regoff = (idx % 8) * 8; regidx = idx / 8; /* replace default with context number */ reg = rmt->map[regidx]; reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff); reg |= (u64)(tctxt++) << regoff; rmt->map[regidx] = reg; if (tctxt == ctxt + krcvqs[i]) tctxt = ctxt; } ctxt += krcvqs[i]; } rrd.offset = rmt->used; rrd.pkt_type = 2; rrd.field1_off = LRH_BTH_MATCH_OFFSET; rrd.field2_off = LRH_SC_MATCH_OFFSET; rrd.index1_off = LRH_SC_SELECT_OFFSET; rrd.index1_width = n; rrd.index2_off = QPN_SELECT_OFFSET; rrd.index2_width = m + n; rrd.mask1 = LRH_BTH_MASK; rrd.value1 = LRH_BTH_VALUE; rrd.mask2 = LRH_SC_MASK; rrd.value2 = LRH_SC_VALUE; /* add rule 0 */ add_rsm_rule(dd, RSM_INS_VERBS, &rrd); /* mark RSM map entries as used */ rmt->used += rmt_entries; /* map everything else to the mcast/err/vl15 context */ init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT); dd->qos_shift = n + 1; return; bail: dd->qos_shift = 1; init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1); } static void init_fecn_handling(struct hfi1_devdata *dd, struct rsm_map_table *rmt) { struct rsm_rule_data rrd; u64 reg; int i, idx, regoff, regidx, start; u8 offset; u32 total_cnt; if (HFI1_CAP_IS_KSET(TID_RDMA)) /* Exclude context 0 */ start = 1; else start = dd->first_dyn_alloc_ctxt; total_cnt = dd->num_rcv_contexts - start; /* there needs to be enough room in the map table */ if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) { dd_dev_err(dd, "FECN handling disabled - too many contexts allocated\n"); return; } /* * RSM will extract the destination context as an index into the * map table. The destination contexts are a sequential block * in the range start...num_rcv_contexts-1 (inclusive). * Map entries are accessed as offset + extracted value. Adjust * the added offset so this sequence can be placed anywhere in * the table - as long as the entries themselves do not wrap. * There are only enough bits in offset for the table size, so * start with that to allow for a "negative" offset. */ offset = (u8)(NUM_MAP_ENTRIES + rmt->used - start); for (i = start, idx = rmt->used; i < dd->num_rcv_contexts; i++, idx++) { /* replace with identity mapping */ regoff = (idx % 8) * 8; regidx = idx / 8; reg = rmt->map[regidx]; reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff); reg |= (u64)i << regoff; rmt->map[regidx] = reg; } /* * For RSM intercept of Expected FECN packets: * o packet type 0 - expected * o match on F (bit 95), using select/match 1, and * o match on SH (bit 133), using select/match 2. * * Use index 1 to extract the 8-bit receive context from DestQP * (start at bit 64). Use that as the RSM map table index. */ rrd.offset = offset; rrd.pkt_type = 0; rrd.field1_off = 95; rrd.field2_off = 133; rrd.index1_off = 64; rrd.index1_width = 8; rrd.index2_off = 0; rrd.index2_width = 0; rrd.mask1 = 1; rrd.value1 = 1; rrd.mask2 = 1; rrd.value2 = 1; /* add rule 1 */ add_rsm_rule(dd, RSM_INS_FECN, &rrd); rmt->used += total_cnt; } static inline bool hfi1_is_rmt_full(int start, int spare) { return (start + spare) > NUM_MAP_ENTRIES; } static bool hfi1_netdev_update_rmt(struct hfi1_devdata *dd) { u8 i, j; u8 ctx_id = 0; u64 reg; u32 regoff; int rmt_start = hfi1_netdev_get_free_rmt_idx(dd); int ctxt_count = hfi1_netdev_ctxt_count(dd); /* We already have contexts mapped in RMT */ if (has_rsm_rule(dd, RSM_INS_VNIC) || has_rsm_rule(dd, RSM_INS_AIP)) { dd_dev_info(dd, "Contexts are already mapped in RMT\n"); return true; } if (hfi1_is_rmt_full(rmt_start, NUM_NETDEV_MAP_ENTRIES)) { dd_dev_err(dd, "Not enough RMT entries used = %d\n", rmt_start); return false; } dev_dbg(&(dd)->pcidev->dev, "RMT start = %d, end %d\n", rmt_start, rmt_start + NUM_NETDEV_MAP_ENTRIES); /* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */ regoff = RCV_RSM_MAP_TABLE + (rmt_start / 8) * 8; reg = read_csr(dd, regoff); for (i = 0; i < NUM_NETDEV_MAP_ENTRIES; i++) { /* Update map register with netdev context */ j = (rmt_start + i) % 8; reg &= ~(0xffllu << (j * 8)); reg |= (u64)hfi1_netdev_get_ctxt(dd, ctx_id++)->ctxt << (j * 8); /* Wrap up netdev ctx index */ ctx_id %= ctxt_count; /* Write back map register */ if (j == 7 || ((i + 1) == NUM_NETDEV_MAP_ENTRIES)) { dev_dbg(&(dd)->pcidev->dev, "RMT[%d] =0x%llx\n", regoff - RCV_RSM_MAP_TABLE, reg); write_csr(dd, regoff, reg); regoff += 8; if (i < (NUM_NETDEV_MAP_ENTRIES - 1)) reg = read_csr(dd, regoff); } } return true; } static void hfi1_enable_rsm_rule(struct hfi1_devdata *dd, int rule, struct rsm_rule_data *rrd) { if (!hfi1_netdev_update_rmt(dd)) { dd_dev_err(dd, "Failed to update RMT for RSM%d rule\n", rule); return; } add_rsm_rule(dd, rule, rrd); add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK); } void hfi1_init_aip_rsm(struct hfi1_devdata *dd) { /* * go through with the initialisation only if this rule actually doesn't * exist yet */ if (atomic_fetch_inc(&dd->ipoib_rsm_usr_num) == 0) { int rmt_start = hfi1_netdev_get_free_rmt_idx(dd); struct rsm_rule_data rrd = { .offset = rmt_start, .pkt_type = IB_PACKET_TYPE, .field1_off = LRH_BTH_MATCH_OFFSET, .mask1 = LRH_BTH_MASK, .value1 = LRH_BTH_VALUE, .field2_off = BTH_DESTQP_MATCH_OFFSET, .mask2 = BTH_DESTQP_MASK, .value2 = BTH_DESTQP_VALUE, .index1_off = DETH_AIP_SQPN_SELECT_OFFSET + ilog2(NUM_NETDEV_MAP_ENTRIES), .index1_width = ilog2(NUM_NETDEV_MAP_ENTRIES), .index2_off = DETH_AIP_SQPN_SELECT_OFFSET, .index2_width = ilog2(NUM_NETDEV_MAP_ENTRIES) }; hfi1_enable_rsm_rule(dd, RSM_INS_AIP, &rrd); } } /* Initialize RSM for VNIC */ void hfi1_init_vnic_rsm(struct hfi1_devdata *dd) { int rmt_start = hfi1_netdev_get_free_rmt_idx(dd); struct rsm_rule_data rrd = { /* Add rule for vnic */ .offset = rmt_start, .pkt_type = 4, /* Match 16B packets */ .field1_off = L2_TYPE_MATCH_OFFSET, .mask1 = L2_TYPE_MASK, .value1 = L2_16B_VALUE, /* Match ETH L4 packets */ .field2_off = L4_TYPE_MATCH_OFFSET, .mask2 = L4_16B_TYPE_MASK, .value2 = L4_16B_ETH_VALUE, /* Calc context from veswid and entropy */ .index1_off = L4_16B_HDR_VESWID_OFFSET, .index1_width = ilog2(NUM_NETDEV_MAP_ENTRIES), .index2_off = L2_16B_ENTROPY_OFFSET, .index2_width = ilog2(NUM_NETDEV_MAP_ENTRIES) }; hfi1_enable_rsm_rule(dd, RSM_INS_VNIC, &rrd); } void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd) { clear_rsm_rule(dd, RSM_INS_VNIC); } void hfi1_deinit_aip_rsm(struct hfi1_devdata *dd) { /* only actually clear the rule if it's the last user asking to do so */ if (atomic_fetch_add_unless(&dd->ipoib_rsm_usr_num, -1, 0) == 1) clear_rsm_rule(dd, RSM_INS_AIP); } static int init_rxe(struct hfi1_devdata *dd) { struct rsm_map_table *rmt; u64 val; /* enable all receive errors */ write_csr(dd, RCV_ERR_MASK, ~0ull); rmt = alloc_rsm_map_table(dd); if (!rmt) return -ENOMEM; /* set up QOS, including the QPN map table */ init_qos(dd, rmt); init_fecn_handling(dd, rmt); complete_rsm_map_table(dd, rmt); /* record number of used rsm map entries for netdev */ hfi1_netdev_set_free_rmt_idx(dd, rmt->used); kfree(rmt); /* * make sure RcvCtrl.RcvWcb <= PCIe Device Control * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and * Max_PayLoad_Size set to its minimum of 128. * * Presently, RcvCtrl.RcvWcb is not modified from its default of 0 * (64 bytes). Max_Payload_Size is possibly modified upward in * tune_pcie_caps() which is called after this routine. */ /* Have 16 bytes (4DW) of bypass header available in header queue */ val = read_csr(dd, RCV_BYPASS); val &= ~RCV_BYPASS_HDR_SIZE_SMASK; val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) << RCV_BYPASS_HDR_SIZE_SHIFT); write_csr(dd, RCV_BYPASS, val); return 0; } static void init_other(struct hfi1_devdata *dd) { /* enable all CCE errors */ write_csr(dd, CCE_ERR_MASK, ~0ull); /* enable *some* Misc errors */ write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK); /* enable all DC errors, except LCB */ write_csr(dd, DCC_ERR_FLG_EN, ~0ull); write_csr(dd, DC_DC8051_ERR_EN, ~0ull); } /* * Fill out the given AU table using the given CU. A CU is defined in terms * AUs. The table is a an encoding: given the index, how many AUs does that * represent? * * NOTE: Assumes that the register layout is the same for the * local and remote tables. */ static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu, u32 csr0to3, u32 csr4to7) { write_csr(dd, csr0to3, 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT | 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT | 2ull * cu << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT | 4ull * cu << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT); write_csr(dd, csr4to7, 8ull * cu << SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT | 16ull * cu << SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT | 32ull * cu << SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT | 64ull * cu << SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT); } static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu) { assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3, SEND_CM_LOCAL_AU_TABLE4_TO7); } void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu) { assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3, SEND_CM_REMOTE_AU_TABLE4_TO7); } static void init_txe(struct hfi1_devdata *dd) { int i; /* enable all PIO, SDMA, general, and Egress errors */ write_csr(dd, SEND_PIO_ERR_MASK, ~0ull); write_csr(dd, SEND_DMA_ERR_MASK, ~0ull); write_csr(dd, SEND_ERR_MASK, ~0ull); write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull); /* enable all per-context and per-SDMA engine errors */ for (i = 0; i < chip_send_contexts(dd); i++) write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull); for (i = 0; i < chip_sdma_engines(dd); i++) write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull); /* set the local CU to AU mapping */ assign_local_cm_au_table(dd, dd->vcu); /* * Set reasonable default for Credit Return Timer * Don't set on Simulator - causes it to choke. */ if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR) write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE); } int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd, u16 jkey) { u8 hw_ctxt; u64 reg; if (!rcd || !rcd->sc) return -EINVAL; hw_ctxt = rcd->sc->hw_context; reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */ ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) << SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT); /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */ if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY)) reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK; write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, reg); /* * Enable send-side J_KEY integrity check, unless this is A0 h/w */ if (!is_ax(dd)) { reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE); reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK; write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg); } /* Enable J_KEY check on receive context. */ reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK | ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) << RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT); write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, reg); return 0; } int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) { u8 hw_ctxt; u64 reg; if (!rcd || !rcd->sc) return -EINVAL; hw_ctxt = rcd->sc->hw_context; write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, 0); /* * Disable send-side J_KEY integrity check, unless this is A0 h/w. * This check would not have been enabled for A0 h/w, see * set_ctxt_jkey(). */ if (!is_ax(dd)) { reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE); reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK; write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg); } /* Turn off the J_KEY on the receive side */ write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, 0); return 0; } int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd, u16 pkey) { u8 hw_ctxt; u64 reg; if (!rcd || !rcd->sc) return -EINVAL; hw_ctxt = rcd->sc->hw_context; reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) << SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT; write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg); reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE); reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK; reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK; write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg); return 0; } int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt) { u8 hw_ctxt; u64 reg; if (!ctxt || !ctxt->sc) return -EINVAL; hw_ctxt = ctxt->sc->hw_context; reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE); reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK; write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg); write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0); return 0; } /* * Start doing the clean up the chip. Our clean up happens in multiple * stages and this is just the first. */ void hfi1_start_cleanup(struct hfi1_devdata *dd) { aspm_exit(dd); free_cntrs(dd); free_rcverr(dd); finish_chip_resources(dd); } #define HFI_BASE_GUID(dev) \ ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT)) /* * Information can be shared between the two HFIs on the same ASIC * in the same OS. This function finds the peer device and sets * up a shared structure. */ static int init_asic_data(struct hfi1_devdata *dd) { unsigned long index; struct hfi1_devdata *peer; struct hfi1_asic_data *asic_data; int ret = 0; /* pre-allocate the asic structure in case we are the first device */ asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL); if (!asic_data) return -ENOMEM; xa_lock_irq(&hfi1_dev_table); /* Find our peer device */ xa_for_each(&hfi1_dev_table, index, peer) { if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(peer)) && dd->unit != peer->unit) break; } if (peer) { /* use already allocated structure */ dd->asic_data = peer->asic_data; kfree(asic_data); } else { dd->asic_data = asic_data; mutex_init(&dd->asic_data->asic_resource_mutex); } dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */ xa_unlock_irq(&hfi1_dev_table); /* first one through - set up i2c devices */ if (!peer) ret = set_up_i2c(dd, dd->asic_data); return ret; } /* * Set dd->boardname. Use a generic name if a name is not returned from * EFI variable space. * * Return 0 on success, -ENOMEM if space could not be allocated. */ static int obtain_boardname(struct hfi1_devdata *dd) { /* generic board description */ const char generic[] = "Cornelis Omni-Path Host Fabric Interface Adapter 100 Series"; unsigned long size; int ret; ret = read_hfi1_efi_var(dd, "description", &size, (void **)&dd->boardname); if (ret) { dd_dev_info(dd, "Board description not found\n"); /* use generic description */ dd->boardname = kstrdup(generic, GFP_KERNEL); if (!dd->boardname) return -ENOMEM; } return 0; } /* * Check the interrupt registers to make sure that they are mapped correctly. * It is intended to help user identify any mismapping by VMM when the driver * is running in a VM. This function should only be called before interrupt * is set up properly. * * Return 0 on success, -EINVAL on failure. */ static int check_int_registers(struct hfi1_devdata *dd) { u64 reg; u64 all_bits = ~(u64)0; u64 mask; /* Clear CceIntMask[0] to avoid raising any interrupts */ mask = read_csr(dd, CCE_INT_MASK); write_csr(dd, CCE_INT_MASK, 0ull); reg = read_csr(dd, CCE_INT_MASK); if (reg) goto err_exit; /* Clear all interrupt status bits */ write_csr(dd, CCE_INT_CLEAR, all_bits); reg = read_csr(dd, CCE_INT_STATUS); if (reg) goto err_exit; /* Set all interrupt status bits */ write_csr(dd, CCE_INT_FORCE, all_bits); reg = read_csr(dd, CCE_INT_STATUS); if (reg != all_bits) goto err_exit; /* Restore the interrupt mask */ write_csr(dd, CCE_INT_CLEAR, all_bits); write_csr(dd, CCE_INT_MASK, mask); return 0; err_exit: write_csr(dd, CCE_INT_MASK, mask); dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n"); return -EINVAL; } /** * hfi1_init_dd() - Initialize most of the dd structure. * @dd: the dd device * * This is global, and is called directly at init to set up the * chip-specific function pointers for later use. */ int hfi1_init_dd(struct hfi1_devdata *dd) { struct pci_dev *pdev = dd->pcidev; struct hfi1_pportdata *ppd; u64 reg; int i, ret; static const char * const inames[] = { /* implementation names */ "RTL silicon", "RTL VCS simulation", "RTL FPGA emulation", "Functional simulator" }; struct pci_dev *parent = pdev->bus->self; u32 sdma_engines = chip_sdma_engines(dd); ppd = dd->pport; for (i = 0; i < dd->num_pports; i++, ppd++) { int vl; /* init common fields */ hfi1_init_pportdata(pdev, ppd, dd, 0, 1); /* DC supports 4 link widths */ ppd->link_width_supported = OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X | OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X; ppd->link_width_downgrade_supported = ppd->link_width_supported; /* start out enabling only 4X */ ppd->link_width_enabled = OPA_LINK_WIDTH_4X; ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported; /* link width active is 0 when link is down */ /* link width downgrade active is 0 when link is down */ if (num_vls < HFI1_MIN_VLS_SUPPORTED || num_vls > HFI1_MAX_VLS_SUPPORTED) { dd_dev_err(dd, "Invalid num_vls %u, using %u VLs\n", num_vls, HFI1_MAX_VLS_SUPPORTED); num_vls = HFI1_MAX_VLS_SUPPORTED; } ppd->vls_supported = num_vls; ppd->vls_operational = ppd->vls_supported; /* Set the default MTU. */ for (vl = 0; vl < num_vls; vl++) dd->vld[vl].mtu = hfi1_max_mtu; dd->vld[15].mtu = MAX_MAD_PACKET; /* * Set the initial values to reasonable default, will be set * for real when link is up. */ ppd->overrun_threshold = 0x4; ppd->phy_error_threshold = 0xf; ppd->port_crc_mode_enabled = link_crc_mask; /* initialize supported LTP CRC mode */ ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8; /* initialize enabled LTP CRC mode */ ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4; /* start in offline */ ppd->host_link_state = HLS_DN_OFFLINE; init_vl_arb_caches(ppd); } /* * Do remaining PCIe setup and save PCIe values in dd. * Any error printing is already done by the init code. * On return, we have the chip mapped. */ ret = hfi1_pcie_ddinit(dd, pdev); if (ret < 0) goto bail_free; /* Save PCI space registers to rewrite after device reset */ ret = save_pci_variables(dd); if (ret < 0) goto bail_cleanup; dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT) & CCE_REVISION_CHIP_REV_MAJOR_MASK; dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT) & CCE_REVISION_CHIP_REV_MINOR_MASK; /* * Check interrupt registers mapping if the driver has no access to * the upstream component. In this case, it is likely that the driver * is running in a VM. */ if (!parent) { ret = check_int_registers(dd); if (ret) goto bail_cleanup; } /* * obtain the hardware ID - NOT related to unit, which is a * software enumeration */ reg = read_csr(dd, CCE_REVISION2); dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT) & CCE_REVISION2_HFI_ID_MASK; /* the variable size will remove unwanted bits */ dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT; dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT; dd_dev_info(dd, "Implementation: %s, revision 0x%x\n", dd->icode < ARRAY_SIZE(inames) ? inames[dd->icode] : "unknown", (int)dd->irev); /* speeds the hardware can support */ dd->pport->link_speed_supported = OPA_LINK_SPEED_25G; /* speeds allowed to run at */ dd->pport->link_speed_enabled = dd->pport->link_speed_supported; /* give a reasonable active value, will be set on link up */ dd->pport->link_speed_active = OPA_LINK_SPEED_25G; /* fix up link widths for emulation _p */ ppd = dd->pport; if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) { ppd->link_width_supported = ppd->link_width_enabled = ppd->link_width_downgrade_supported = ppd->link_width_downgrade_enabled = OPA_LINK_WIDTH_1X; } /* insure num_vls isn't larger than number of sdma engines */ if (HFI1_CAP_IS_KSET(SDMA) && num_vls > sdma_engines) { dd_dev_err(dd, "num_vls %u too large, using %u VLs\n", num_vls, sdma_engines); num_vls = sdma_engines; ppd->vls_supported = sdma_engines; ppd->vls_operational = ppd->vls_supported; } /* * Convert the ns parameter to the 64 * cclocks used in the CSR. * Limit the max if larger than the field holds. If timeout is * non-zero, then the calculated field will be at least 1. * * Must be after icode is set up - the cclock rate depends * on knowing the hardware being used. */ dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64; if (dd->rcv_intr_timeout_csr > RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK) dd->rcv_intr_timeout_csr = RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK; else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout) dd->rcv_intr_timeout_csr = 1; /* needs to be done before we look for the peer device */ read_guid(dd); /* set up shared ASIC data with peer device */ ret = init_asic_data(dd); if (ret) goto bail_cleanup; /* obtain chip sizes, reset chip CSRs */ ret = init_chip(dd); if (ret) goto bail_cleanup; /* read in the PCIe link speed information */ ret = pcie_speeds(dd); if (ret) goto bail_cleanup; /* call before get_platform_config(), after init_chip_resources() */ ret = eprom_init(dd); if (ret) goto bail_free_rcverr; /* Needs to be called before hfi1_firmware_init */ get_platform_config(dd); /* read in firmware */ ret = hfi1_firmware_init(dd); if (ret) goto bail_cleanup; /* * In general, the PCIe Gen3 transition must occur after the * chip has been idled (so it won't initiate any PCIe transactions * e.g. an interrupt) and before the driver changes any registers * (the transition will reset the registers). * * In particular, place this call after: * - init_chip() - the chip will not initiate any PCIe transactions * - pcie_speeds() - reads the current link speed * - hfi1_firmware_init() - the needed firmware is ready to be * downloaded */ ret = do_pcie_gen3_transition(dd); if (ret) goto bail_cleanup; /* * This should probably occur in hfi1_pcie_init(), but historically * occurs after the do_pcie_gen3_transition() code. */ tune_pcie_caps(dd); /* start setting dd values and adjusting CSRs */ init_early_variables(dd); parse_platform_config(dd); ret = obtain_boardname(dd); if (ret) goto bail_cleanup; snprintf(dd->boardversion, BOARD_VERS_MAX, "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n", HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN, (u32)dd->majrev, (u32)dd->minrev, (dd->revision >> CCE_REVISION_SW_SHIFT) & CCE_REVISION_SW_MASK); /* alloc VNIC/AIP rx data */ ret = hfi1_alloc_rx(dd); if (ret) goto bail_cleanup; ret = set_up_context_variables(dd); if (ret) goto bail_cleanup; /* set initial RXE CSRs */ ret = init_rxe(dd); if (ret) goto bail_cleanup; /* set initial TXE CSRs */ init_txe(dd); /* set initial non-RXE, non-TXE CSRs */ init_other(dd); /* set up KDETH QP prefix in both RX and TX CSRs */ init_kdeth_qp(dd); ret = hfi1_dev_affinity_init(dd); if (ret) goto bail_cleanup; /* send contexts must be set up before receive contexts */ ret = init_send_contexts(dd); if (ret) goto bail_cleanup; ret = hfi1_create_kctxts(dd); if (ret) goto bail_cleanup; /* * Initialize aspm, to be done after gen3 transition and setting up * contexts and before enabling interrupts */ aspm_init(dd); ret = init_pervl_scs(dd); if (ret) goto bail_cleanup; /* sdma init */ for (i = 0; i < dd->num_pports; ++i) { ret = sdma_init(dd, i); if (ret) goto bail_cleanup; } /* use contexts created by hfi1_create_kctxts */ ret = set_up_interrupts(dd); if (ret) goto bail_cleanup; ret = hfi1_comp_vectors_set_up(dd); if (ret) goto bail_clear_intr; /* set up LCB access - must be after set_up_interrupts() */ init_lcb_access(dd); /* * Serial number is created from the base guid: * [27:24] = base guid [38:35] * [23: 0] = base guid [23: 0] */ snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n", (dd->base_guid & 0xFFFFFF) | ((dd->base_guid >> 11) & 0xF000000)); dd->oui1 = dd->base_guid >> 56 & 0xFF; dd->oui2 = dd->base_guid >> 48 & 0xFF; dd->oui3 = dd->base_guid >> 40 & 0xFF; ret = load_firmware(dd); /* asymmetric with dispose_firmware() */ if (ret) goto bail_clear_intr; thermal_init(dd); ret = init_cntrs(dd); if (ret) goto bail_clear_intr; ret = init_rcverr(dd); if (ret) goto bail_free_cntrs; init_completion(&dd->user_comp); /* The user refcount starts with one to inidicate an active device */ refcount_set(&dd->user_refcount, 1); goto bail; bail_free_rcverr: free_rcverr(dd); bail_free_cntrs: free_cntrs(dd); bail_clear_intr: hfi1_comp_vectors_clean_up(dd); msix_clean_up_interrupts(dd); bail_cleanup: hfi1_free_rx(dd); hfi1_pcie_ddcleanup(dd); bail_free: hfi1_free_devdata(dd); bail: return ret; } static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate, u32 dw_len) { u32 delta_cycles; u32 current_egress_rate = ppd->current_egress_rate; /* rates here are in units of 10^6 bits/sec */ if (desired_egress_rate == -1) return 0; /* shouldn't happen */ if (desired_egress_rate >= current_egress_rate) return 0; /* we can't help go faster, only slower */ delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) - egress_cycles(dw_len * 4, current_egress_rate); return (u16)delta_cycles; } /** * create_pbc - build a pbc for transmission * @ppd: info of physical Hfi port * @flags: special case flags or-ed in built pbc * @srate_mbs: static rate * @vl: vl * @dw_len: dword length (header words + data words + pbc words) * * Create a PBC with the given flags, rate, VL, and length. * * NOTE: The PBC created will not insert any HCRC - all callers but one are * for verbs, which does not use this PSM feature. The lone other caller * is for the diagnostic interface which calls this if the user does not * supply their own PBC. */ u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl, u32 dw_len) { u64 pbc, delay = 0; if (unlikely(srate_mbs)) delay = delay_cycles(ppd, srate_mbs, dw_len); pbc = flags | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT) | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT) | (vl & PBC_VL_MASK) << PBC_VL_SHIFT | (dw_len & PBC_LENGTH_DWS_MASK) << PBC_LENGTH_DWS_SHIFT; return pbc; } #define SBUS_THERMAL 0x4f #define SBUS_THERM_MONITOR_MODE 0x1 #define THERM_FAILURE(dev, ret, reason) \ dd_dev_err((dd), \ "Thermal sensor initialization failed: %s (%d)\n", \ (reason), (ret)) /* * Initialize the thermal sensor. * * After initialization, enable polling of thermal sensor through * SBus interface. In order for this to work, the SBus Master * firmware has to be loaded due to the fact that the HW polling * logic uses SBus interrupts, which are not supported with * default firmware. Otherwise, no data will be returned through * the ASIC_STS_THERM CSR. */ static int thermal_init(struct hfi1_devdata *dd) { int ret = 0; if (dd->icode != ICODE_RTL_SILICON || check_chip_resource(dd, CR_THERM_INIT, NULL)) return ret; ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT); if (ret) { THERM_FAILURE(dd, ret, "Acquire SBus"); return ret; } dd_dev_info(dd, "Initializing thermal sensor\n"); /* Disable polling of thermal readings */ write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0); msleep(100); /* Thermal Sensor Initialization */ /* Step 1: Reset the Thermal SBus Receiver */ ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0, RESET_SBUS_RECEIVER, 0); if (ret) { THERM_FAILURE(dd, ret, "Bus Reset"); goto done; } /* Step 2: Set Reset bit in Thermal block */ ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0, WRITE_SBUS_RECEIVER, 0x1); if (ret) { THERM_FAILURE(dd, ret, "Therm Block Reset"); goto done; } /* Step 3: Write clock divider value (100MHz -> 2MHz) */ ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1, WRITE_SBUS_RECEIVER, 0x32); if (ret) { THERM_FAILURE(dd, ret, "Write Clock Div"); goto done; } /* Step 4: Select temperature mode */ ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3, WRITE_SBUS_RECEIVER, SBUS_THERM_MONITOR_MODE); if (ret) { THERM_FAILURE(dd, ret, "Write Mode Sel"); goto done; } /* Step 5: De-assert block reset and start conversion */ ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0, WRITE_SBUS_RECEIVER, 0x2); if (ret) { THERM_FAILURE(dd, ret, "Write Reset Deassert"); goto done; } /* Step 5.1: Wait for first conversion (21.5ms per spec) */ msleep(22); /* Enable polling of thermal readings */ write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1); /* Set initialized flag */ ret = acquire_chip_resource(dd, CR_THERM_INIT, 0); if (ret) THERM_FAILURE(dd, ret, "Unable to set thermal init flag"); done: release_chip_resource(dd, CR_SBUS); return ret; } static void handle_temp_err(struct hfi1_devdata *dd) { struct hfi1_pportdata *ppd = &dd->pport[0]; /* * Thermal Critical Interrupt * Put the device into forced freeze mode, take link down to * offline, and put DC into reset. */ dd_dev_emerg(dd, "Critical temperature reached! Forcing device into freeze mode!\n"); dd->flags |= HFI1_FORCED_FREEZE; start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT); /* * Shut DC down as much and as quickly as possible. * * Step 1: Take the link down to OFFLINE. This will cause the * 8051 to put the Serdes in reset. However, we don't want to * go through the entire link state machine since we want to * shutdown ASAP. Furthermore, this is not a graceful shutdown * but rather an attempt to save the chip. * Code below is almost the same as quiet_serdes() but avoids * all the extra work and the sleeps. */ ppd->driver_link_ready = 0; ppd->link_enabled = 0; set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) | PLS_OFFLINE); /* * Step 2: Shutdown LCB and 8051 * After shutdown, do not restore DC_CFG_RESET value. */ dc_shutdown(dd); }
linux-master
drivers/infiniband/hw/hfi1/chip.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2015-2018 Intel Corporation. */ #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ratelimit.h> #include <linux/fault-inject.h> #include "hfi.h" #include "trace.h" #include "debugfs.h" #include "device.h" #include "qp.h" #include "sdma.h" #include "fault.h" static struct dentry *hfi1_dbg_root; /* wrappers to enforce srcu in seq file */ ssize_t hfi1_seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) { struct dentry *d = file->f_path.dentry; ssize_t r; r = debugfs_file_get(d); if (unlikely(r)) return r; r = seq_read(file, buf, size, ppos); debugfs_file_put(d); return r; } loff_t hfi1_seq_lseek(struct file *file, loff_t offset, int whence) { struct dentry *d = file->f_path.dentry; loff_t r; r = debugfs_file_get(d); if (unlikely(r)) return r; r = seq_lseek(file, offset, whence); debugfs_file_put(d); return r; } #define private2dd(file) (file_inode(file)->i_private) #define private2ppd(file) (file_inode(file)->i_private) static void *_opcode_stats_seq_start(struct seq_file *s, loff_t *pos) { struct hfi1_opcode_stats_perctx *opstats; if (*pos >= ARRAY_SIZE(opstats->stats)) return NULL; return pos; } static void *_opcode_stats_seq_next(struct seq_file *s, void *v, loff_t *pos) { struct hfi1_opcode_stats_perctx *opstats; ++*pos; if (*pos >= ARRAY_SIZE(opstats->stats)) return NULL; return pos; } static void _opcode_stats_seq_stop(struct seq_file *s, void *v) { } static int opcode_stats_show(struct seq_file *s, u8 i, u64 packets, u64 bytes) { if (!packets && !bytes) return SEQ_SKIP; seq_printf(s, "%02x %llu/%llu\n", i, (unsigned long long)packets, (unsigned long long)bytes); return 0; } static int _opcode_stats_seq_show(struct seq_file *s, void *v) { loff_t *spos = v; loff_t i = *spos, j; u64 n_packets = 0, n_bytes = 0; struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private; struct hfi1_devdata *dd = dd_from_dev(ibd); struct hfi1_ctxtdata *rcd; for (j = 0; j < dd->first_dyn_alloc_ctxt; j++) { rcd = hfi1_rcd_get_by_index(dd, j); if (rcd) { n_packets += rcd->opstats->stats[i].n_packets; n_bytes += rcd->opstats->stats[i].n_bytes; } hfi1_rcd_put(rcd); } return opcode_stats_show(s, i, n_packets, n_bytes); } DEBUGFS_SEQ_FILE_OPS(opcode_stats); DEBUGFS_SEQ_FILE_OPEN(opcode_stats) DEBUGFS_FILE_OPS(opcode_stats); static void *_tx_opcode_stats_seq_start(struct seq_file *s, loff_t *pos) { return _opcode_stats_seq_start(s, pos); } static void *_tx_opcode_stats_seq_next(struct seq_file *s, void *v, loff_t *pos) { return _opcode_stats_seq_next(s, v, pos); } static void _tx_opcode_stats_seq_stop(struct seq_file *s, void *v) { } static int _tx_opcode_stats_seq_show(struct seq_file *s, void *v) { loff_t *spos = v; loff_t i = *spos; int j; u64 n_packets = 0, n_bytes = 0; struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private; struct hfi1_devdata *dd = dd_from_dev(ibd); for_each_possible_cpu(j) { struct hfi1_opcode_stats_perctx *s = per_cpu_ptr(dd->tx_opstats, j); n_packets += s->stats[i].n_packets; n_bytes += s->stats[i].n_bytes; } return opcode_stats_show(s, i, n_packets, n_bytes); } DEBUGFS_SEQ_FILE_OPS(tx_opcode_stats); DEBUGFS_SEQ_FILE_OPEN(tx_opcode_stats) DEBUGFS_FILE_OPS(tx_opcode_stats); static void *_ctx_stats_seq_start(struct seq_file *s, loff_t *pos) { struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private; struct hfi1_devdata *dd = dd_from_dev(ibd); if (!*pos) return SEQ_START_TOKEN; if (*pos >= dd->first_dyn_alloc_ctxt) return NULL; return pos; } static void *_ctx_stats_seq_next(struct seq_file *s, void *v, loff_t *pos) { struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private; struct hfi1_devdata *dd = dd_from_dev(ibd); if (v == SEQ_START_TOKEN) return pos; ++*pos; if (*pos >= dd->first_dyn_alloc_ctxt) return NULL; return pos; } static void _ctx_stats_seq_stop(struct seq_file *s, void *v) { /* nothing allocated */ } static int _ctx_stats_seq_show(struct seq_file *s, void *v) { loff_t *spos; loff_t i, j; u64 n_packets = 0; struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private; struct hfi1_devdata *dd = dd_from_dev(ibd); struct hfi1_ctxtdata *rcd; if (v == SEQ_START_TOKEN) { seq_puts(s, "Ctx:npkts\n"); return 0; } spos = v; i = *spos; rcd = hfi1_rcd_get_by_index_safe(dd, i); if (!rcd) return SEQ_SKIP; for (j = 0; j < ARRAY_SIZE(rcd->opstats->stats); j++) n_packets += rcd->opstats->stats[j].n_packets; hfi1_rcd_put(rcd); if (!n_packets) return SEQ_SKIP; seq_printf(s, " %llu:%llu\n", i, n_packets); return 0; } DEBUGFS_SEQ_FILE_OPS(ctx_stats); DEBUGFS_SEQ_FILE_OPEN(ctx_stats) DEBUGFS_FILE_OPS(ctx_stats); static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos) __acquires(RCU) { struct rvt_qp_iter *iter; loff_t n = *pos; iter = rvt_qp_iter_init(s->private, 0, NULL); /* stop calls rcu_read_unlock */ rcu_read_lock(); if (!iter) return NULL; do { if (rvt_qp_iter_next(iter)) { kfree(iter); return NULL; } } while (n--); return iter; } static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr, loff_t *pos) __must_hold(RCU) { struct rvt_qp_iter *iter = iter_ptr; (*pos)++; if (rvt_qp_iter_next(iter)) { kfree(iter); return NULL; } return iter; } static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr) __releases(RCU) { rcu_read_unlock(); } static int _qp_stats_seq_show(struct seq_file *s, void *iter_ptr) { struct rvt_qp_iter *iter = iter_ptr; if (!iter) return 0; qp_iter_print(s, iter); return 0; } DEBUGFS_SEQ_FILE_OPS(qp_stats); DEBUGFS_SEQ_FILE_OPEN(qp_stats) DEBUGFS_FILE_OPS(qp_stats); static void *_sdes_seq_start(struct seq_file *s, loff_t *pos) { struct hfi1_ibdev *ibd; struct hfi1_devdata *dd; ibd = (struct hfi1_ibdev *)s->private; dd = dd_from_dev(ibd); if (!dd->per_sdma || *pos >= dd->num_sdma) return NULL; return pos; } static void *_sdes_seq_next(struct seq_file *s, void *v, loff_t *pos) { struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private; struct hfi1_devdata *dd = dd_from_dev(ibd); ++*pos; if (!dd->per_sdma || *pos >= dd->num_sdma) return NULL; return pos; } static void _sdes_seq_stop(struct seq_file *s, void *v) { } static int _sdes_seq_show(struct seq_file *s, void *v) { struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private; struct hfi1_devdata *dd = dd_from_dev(ibd); loff_t *spos = v; loff_t i = *spos; sdma_seqfile_dump_sde(s, &dd->per_sdma[i]); return 0; } DEBUGFS_SEQ_FILE_OPS(sdes); DEBUGFS_SEQ_FILE_OPEN(sdes) DEBUGFS_FILE_OPS(sdes); static void *_rcds_seq_start(struct seq_file *s, loff_t *pos) { struct hfi1_ibdev *ibd; struct hfi1_devdata *dd; ibd = (struct hfi1_ibdev *)s->private; dd = dd_from_dev(ibd); if (!dd->rcd || *pos >= dd->n_krcv_queues) return NULL; return pos; } static void *_rcds_seq_next(struct seq_file *s, void *v, loff_t *pos) { struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private; struct hfi1_devdata *dd = dd_from_dev(ibd); ++*pos; if (!dd->rcd || *pos >= dd->num_rcv_contexts) return NULL; return pos; } static void _rcds_seq_stop(struct seq_file *s, void *v) { } static int _rcds_seq_show(struct seq_file *s, void *v) { struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private; struct hfi1_devdata *dd = dd_from_dev(ibd); struct hfi1_ctxtdata *rcd; loff_t *spos = v; loff_t i = *spos; rcd = hfi1_rcd_get_by_index_safe(dd, i); if (rcd) seqfile_dump_rcd(s, rcd); hfi1_rcd_put(rcd); return 0; } DEBUGFS_SEQ_FILE_OPS(rcds); DEBUGFS_SEQ_FILE_OPEN(rcds) DEBUGFS_FILE_OPS(rcds); static void *_pios_seq_start(struct seq_file *s, loff_t *pos) { struct hfi1_ibdev *ibd; struct hfi1_devdata *dd; ibd = (struct hfi1_ibdev *)s->private; dd = dd_from_dev(ibd); if (!dd->send_contexts || *pos >= dd->num_send_contexts) return NULL; return pos; } static void *_pios_seq_next(struct seq_file *s, void *v, loff_t *pos) { struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private; struct hfi1_devdata *dd = dd_from_dev(ibd); ++*pos; if (!dd->send_contexts || *pos >= dd->num_send_contexts) return NULL; return pos; } static void _pios_seq_stop(struct seq_file *s, void *v) { } static int _pios_seq_show(struct seq_file *s, void *v) { struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private; struct hfi1_devdata *dd = dd_from_dev(ibd); struct send_context_info *sci; loff_t *spos = v; loff_t i = *spos; unsigned long flags; spin_lock_irqsave(&dd->sc_lock, flags); sci = &dd->send_contexts[i]; if (sci && sci->type != SC_USER && sci->allocated && sci->sc) seqfile_dump_sci(s, i, sci); spin_unlock_irqrestore(&dd->sc_lock, flags); return 0; } DEBUGFS_SEQ_FILE_OPS(pios); DEBUGFS_SEQ_FILE_OPEN(pios) DEBUGFS_FILE_OPS(pios); /* read the per-device counters */ static ssize_t dev_counters_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { u64 *counters; size_t avail; struct hfi1_devdata *dd; ssize_t rval; dd = private2dd(file); avail = hfi1_read_cntrs(dd, NULL, &counters); rval = simple_read_from_buffer(buf, count, ppos, counters, avail); return rval; } /* read the per-device counters */ static ssize_t dev_names_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { char *names; size_t avail; struct hfi1_devdata *dd; ssize_t rval; dd = private2dd(file); avail = hfi1_read_cntrs(dd, &names, NULL); rval = simple_read_from_buffer(buf, count, ppos, names, avail); return rval; } struct counter_info { char *name; const struct file_operations ops; }; /* * Could use file_inode(file)->i_ino to figure out which file, * instead of separate routine for each, but for now, this works... */ /* read the per-port names (same for each port) */ static ssize_t portnames_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { char *names; size_t avail; struct hfi1_devdata *dd; ssize_t rval; dd = private2dd(file); avail = hfi1_read_portcntrs(dd->pport, &names, NULL); rval = simple_read_from_buffer(buf, count, ppos, names, avail); return rval; } /* read the per-port counters */ static ssize_t portcntrs_debugfs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { u64 *counters; size_t avail; struct hfi1_pportdata *ppd; ssize_t rval; ppd = private2ppd(file); avail = hfi1_read_portcntrs(ppd, NULL, &counters); rval = simple_read_from_buffer(buf, count, ppos, counters, avail); return rval; } static void check_dyn_flag(u64 scratch0, char *p, int size, int *used, int this_hfi, int hfi, u32 flag, const char *what) { u32 mask; mask = flag << (hfi ? CR_DYN_SHIFT : 0); if (scratch0 & mask) { *used += scnprintf(p + *used, size - *used, " 0x%08x - HFI%d %s in use, %s device\n", mask, hfi, what, this_hfi == hfi ? "this" : "other"); } } static ssize_t asic_flags_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct hfi1_pportdata *ppd; struct hfi1_devdata *dd; u64 scratch0; char *tmp; int ret = 0; int size; int used; int i; ppd = private2ppd(file); dd = ppd->dd; size = PAGE_SIZE; used = 0; tmp = kmalloc(size, GFP_KERNEL); if (!tmp) return -ENOMEM; scratch0 = read_csr(dd, ASIC_CFG_SCRATCH); used += scnprintf(tmp + used, size - used, "Resource flags: 0x%016llx\n", scratch0); /* check permanent flag */ if (scratch0 & CR_THERM_INIT) { used += scnprintf(tmp + used, size - used, " 0x%08x - thermal monitoring initialized\n", (u32)CR_THERM_INIT); } /* check each dynamic flag on each HFI */ for (i = 0; i < 2; i++) { check_dyn_flag(scratch0, tmp, size, &used, dd->hfi1_id, i, CR_SBUS, "SBus"); check_dyn_flag(scratch0, tmp, size, &used, dd->hfi1_id, i, CR_EPROM, "EPROM"); check_dyn_flag(scratch0, tmp, size, &used, dd->hfi1_id, i, CR_I2C1, "i2c chain 1"); check_dyn_flag(scratch0, tmp, size, &used, dd->hfi1_id, i, CR_I2C2, "i2c chain 2"); } used += scnprintf(tmp + used, size - used, "Write bits to clear\n"); ret = simple_read_from_buffer(buf, count, ppos, tmp, used); kfree(tmp); return ret; } static ssize_t asic_flags_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct hfi1_pportdata *ppd; struct hfi1_devdata *dd; char *buff; int ret; unsigned long long value; u64 scratch0; u64 clear; ppd = private2ppd(file); dd = ppd->dd; /* zero terminate and read the expected integer */ buff = memdup_user_nul(buf, count); if (IS_ERR(buff)) return PTR_ERR(buff); ret = kstrtoull(buff, 0, &value); if (ret) goto do_free; clear = value; /* obtain exclusive access */ mutex_lock(&dd->asic_data->asic_resource_mutex); acquire_hw_mutex(dd); scratch0 = read_csr(dd, ASIC_CFG_SCRATCH); scratch0 &= ~clear; write_csr(dd, ASIC_CFG_SCRATCH, scratch0); /* force write to be visible to other HFI on another OS */ (void)read_csr(dd, ASIC_CFG_SCRATCH); release_hw_mutex(dd); mutex_unlock(&dd->asic_data->asic_resource_mutex); /* return the number of bytes written */ ret = count; do_free: kfree(buff); return ret; } /* read the dc8051 memory */ static ssize_t dc8051_memory_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct hfi1_pportdata *ppd = private2ppd(file); ssize_t rval; void *tmp; loff_t start, end; /* the checks below expect the position to be positive */ if (*ppos < 0) return -EINVAL; tmp = kzalloc(DC8051_DATA_MEM_SIZE, GFP_KERNEL); if (!tmp) return -ENOMEM; /* * Fill in the requested portion of the temporary buffer from the * 8051 memory. The 8051 memory read is done in terms of 8 bytes. * Adjust start and end to fit. Skip reading anything if out of * range. */ start = *ppos & ~0x7; /* round down */ if (start < DC8051_DATA_MEM_SIZE) { end = (*ppos + count + 7) & ~0x7; /* round up */ if (end > DC8051_DATA_MEM_SIZE) end = DC8051_DATA_MEM_SIZE; rval = read_8051_data(ppd->dd, start, end - start, (u64 *)(tmp + start)); if (rval) goto done; } rval = simple_read_from_buffer(buf, count, ppos, tmp, DC8051_DATA_MEM_SIZE); done: kfree(tmp); return rval; } static ssize_t debugfs_lcb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct hfi1_pportdata *ppd = private2ppd(file); struct hfi1_devdata *dd = ppd->dd; unsigned long total, csr_off; u64 data; if (*ppos < 0) return -EINVAL; /* only read 8 byte quantities */ if ((count % 8) != 0) return -EINVAL; /* offset must be 8-byte aligned */ if ((*ppos % 8) != 0) return -EINVAL; /* do nothing if out of range or zero count */ if (*ppos >= (LCB_END - LCB_START) || !count) return 0; /* reduce count if needed */ if (*ppos + count > LCB_END - LCB_START) count = (LCB_END - LCB_START) - *ppos; csr_off = LCB_START + *ppos; for (total = 0; total < count; total += 8, csr_off += 8) { if (read_lcb_csr(dd, csr_off, (u64 *)&data)) break; /* failed */ if (put_user(data, (unsigned long __user *)(buf + total))) break; } *ppos += total; return total; } static ssize_t debugfs_lcb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct hfi1_pportdata *ppd = private2ppd(file); struct hfi1_devdata *dd = ppd->dd; unsigned long total, csr_off, data; if (*ppos < 0) return -EINVAL; /* only write 8 byte quantities */ if ((count % 8) != 0) return -EINVAL; /* offset must be 8-byte aligned */ if ((*ppos % 8) != 0) return -EINVAL; /* do nothing if out of range or zero count */ if (*ppos >= (LCB_END - LCB_START) || !count) return 0; /* reduce count if needed */ if (*ppos + count > LCB_END - LCB_START) count = (LCB_END - LCB_START) - *ppos; csr_off = LCB_START + *ppos; for (total = 0; total < count; total += 8, csr_off += 8) { if (get_user(data, (unsigned long __user *)(buf + total))) break; if (write_lcb_csr(dd, csr_off, data)) break; /* failed */ } *ppos += total; return total; } /* * read the per-port QSFP data for ppd */ static ssize_t qsfp_debugfs_dump(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct hfi1_pportdata *ppd; char *tmp; int ret; ppd = private2ppd(file); tmp = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!tmp) return -ENOMEM; ret = qsfp_dump(ppd, tmp, PAGE_SIZE); if (ret > 0) ret = simple_read_from_buffer(buf, count, ppos, tmp, ret); kfree(tmp); return ret; } /* Do an i2c write operation on the chain for the given HFI. */ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos, u32 target) { struct hfi1_pportdata *ppd; char *buff; int ret; int i2c_addr; int offset; int total_written; ppd = private2ppd(file); /* byte offset format: [offsetSize][i2cAddr][offsetHigh][offsetLow] */ i2c_addr = (*ppos >> 16) & 0xffff; offset = *ppos & 0xffff; /* explicitly reject invalid address 0 to catch cp and cat */ if (i2c_addr == 0) return -EINVAL; buff = memdup_user(buf, count); if (IS_ERR(buff)) return PTR_ERR(buff); total_written = i2c_write(ppd, target, i2c_addr, offset, buff, count); if (total_written < 0) { ret = total_written; goto _free; } *ppos += total_written; ret = total_written; _free: kfree(buff); return ret; } /* Do an i2c write operation on chain for HFI 0. */ static ssize_t i2c1_debugfs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { return __i2c_debugfs_write(file, buf, count, ppos, 0); } /* Do an i2c write operation on chain for HFI 1. */ static ssize_t i2c2_debugfs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { return __i2c_debugfs_write(file, buf, count, ppos, 1); } /* Do an i2c read operation on the chain for the given HFI. */ static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos, u32 target) { struct hfi1_pportdata *ppd; char *buff; int ret; int i2c_addr; int offset; int total_read; ppd = private2ppd(file); /* byte offset format: [offsetSize][i2cAddr][offsetHigh][offsetLow] */ i2c_addr = (*ppos >> 16) & 0xffff; offset = *ppos & 0xffff; /* explicitly reject invalid address 0 to catch cp and cat */ if (i2c_addr == 0) return -EINVAL; buff = kmalloc(count, GFP_KERNEL); if (!buff) return -ENOMEM; total_read = i2c_read(ppd, target, i2c_addr, offset, buff, count); if (total_read < 0) { ret = total_read; goto _free; } *ppos += total_read; ret = copy_to_user(buf, buff, total_read); if (ret > 0) { ret = -EFAULT; goto _free; } ret = total_read; _free: kfree(buff); return ret; } /* Do an i2c read operation on chain for HFI 0. */ static ssize_t i2c1_debugfs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { return __i2c_debugfs_read(file, buf, count, ppos, 0); } /* Do an i2c read operation on chain for HFI 1. */ static ssize_t i2c2_debugfs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { return __i2c_debugfs_read(file, buf, count, ppos, 1); } /* Do a QSFP write operation on the i2c chain for the given HFI. */ static ssize_t __qsfp_debugfs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos, u32 target) { struct hfi1_pportdata *ppd; char *buff; int ret; int total_written; if (*ppos + count > QSFP_PAGESIZE * 4) /* base page + page00-page03 */ return -EINVAL; ppd = private2ppd(file); buff = memdup_user(buf, count); if (IS_ERR(buff)) return PTR_ERR(buff); total_written = qsfp_write(ppd, target, *ppos, buff, count); if (total_written < 0) { ret = total_written; goto _free; } *ppos += total_written; ret = total_written; _free: kfree(buff); return ret; } /* Do a QSFP write operation on i2c chain for HFI 0. */ static ssize_t qsfp1_debugfs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { return __qsfp_debugfs_write(file, buf, count, ppos, 0); } /* Do a QSFP write operation on i2c chain for HFI 1. */ static ssize_t qsfp2_debugfs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { return __qsfp_debugfs_write(file, buf, count, ppos, 1); } /* Do a QSFP read operation on the i2c chain for the given HFI. */ static ssize_t __qsfp_debugfs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos, u32 target) { struct hfi1_pportdata *ppd; char *buff; int ret; int total_read; if (*ppos + count > QSFP_PAGESIZE * 4) { /* base page + page00-page03 */ ret = -EINVAL; goto _return; } ppd = private2ppd(file); buff = kmalloc(count, GFP_KERNEL); if (!buff) { ret = -ENOMEM; goto _return; } total_read = qsfp_read(ppd, target, *ppos, buff, count); if (total_read < 0) { ret = total_read; goto _free; } *ppos += total_read; ret = copy_to_user(buf, buff, total_read); if (ret > 0) { ret = -EFAULT; goto _free; } ret = total_read; _free: kfree(buff); _return: return ret; } /* Do a QSFP read operation on i2c chain for HFI 0. */ static ssize_t qsfp1_debugfs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { return __qsfp_debugfs_read(file, buf, count, ppos, 0); } /* Do a QSFP read operation on i2c chain for HFI 1. */ static ssize_t qsfp2_debugfs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { return __qsfp_debugfs_read(file, buf, count, ppos, 1); } static int __i2c_debugfs_open(struct inode *in, struct file *fp, u32 target) { struct hfi1_pportdata *ppd; ppd = private2ppd(fp); return acquire_chip_resource(ppd->dd, i2c_target(target), 0); } static int i2c1_debugfs_open(struct inode *in, struct file *fp) { return __i2c_debugfs_open(in, fp, 0); } static int i2c2_debugfs_open(struct inode *in, struct file *fp) { return __i2c_debugfs_open(in, fp, 1); } static int __i2c_debugfs_release(struct inode *in, struct file *fp, u32 target) { struct hfi1_pportdata *ppd; ppd = private2ppd(fp); release_chip_resource(ppd->dd, i2c_target(target)); return 0; } static int i2c1_debugfs_release(struct inode *in, struct file *fp) { return __i2c_debugfs_release(in, fp, 0); } static int i2c2_debugfs_release(struct inode *in, struct file *fp) { return __i2c_debugfs_release(in, fp, 1); } static int __qsfp_debugfs_open(struct inode *in, struct file *fp, u32 target) { struct hfi1_pportdata *ppd; ppd = private2ppd(fp); return acquire_chip_resource(ppd->dd, i2c_target(target), 0); } static int qsfp1_debugfs_open(struct inode *in, struct file *fp) { return __qsfp_debugfs_open(in, fp, 0); } static int qsfp2_debugfs_open(struct inode *in, struct file *fp) { return __qsfp_debugfs_open(in, fp, 1); } static int __qsfp_debugfs_release(struct inode *in, struct file *fp, u32 target) { struct hfi1_pportdata *ppd; ppd = private2ppd(fp); release_chip_resource(ppd->dd, i2c_target(target)); return 0; } static int qsfp1_debugfs_release(struct inode *in, struct file *fp) { return __qsfp_debugfs_release(in, fp, 0); } static int qsfp2_debugfs_release(struct inode *in, struct file *fp) { return __qsfp_debugfs_release(in, fp, 1); } #define EXPROM_WRITE_ENABLE BIT_ULL(14) static bool exprom_wp_disabled; static int exprom_wp_set(struct hfi1_devdata *dd, bool disable) { u64 gpio_val = 0; if (disable) { gpio_val = EXPROM_WRITE_ENABLE; exprom_wp_disabled = true; dd_dev_info(dd, "Disable Expansion ROM Write Protection\n"); } else { exprom_wp_disabled = false; dd_dev_info(dd, "Enable Expansion ROM Write Protection\n"); } write_csr(dd, ASIC_GPIO_OUT, gpio_val); write_csr(dd, ASIC_GPIO_OE, gpio_val); return 0; } static ssize_t exprom_wp_debugfs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { return 0; } static ssize_t exprom_wp_debugfs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct hfi1_pportdata *ppd = private2ppd(file); char cdata; if (count != 1) return -EINVAL; if (get_user(cdata, buf)) return -EFAULT; if (cdata == '0') exprom_wp_set(ppd->dd, false); else if (cdata == '1') exprom_wp_set(ppd->dd, true); else return -EINVAL; return 1; } static unsigned long exprom_in_use; static int exprom_wp_debugfs_open(struct inode *in, struct file *fp) { if (test_and_set_bit(0, &exprom_in_use)) return -EBUSY; return 0; } static int exprom_wp_debugfs_release(struct inode *in, struct file *fp) { struct hfi1_pportdata *ppd = private2ppd(fp); if (exprom_wp_disabled) exprom_wp_set(ppd->dd, false); clear_bit(0, &exprom_in_use); return 0; } #define DEBUGFS_OPS(nm, readroutine, writeroutine) \ { \ .name = nm, \ .ops = { \ .owner = THIS_MODULE, \ .read = readroutine, \ .write = writeroutine, \ .llseek = generic_file_llseek, \ }, \ } #define DEBUGFS_XOPS(nm, readf, writef, openf, releasef) \ { \ .name = nm, \ .ops = { \ .owner = THIS_MODULE, \ .read = readf, \ .write = writef, \ .llseek = generic_file_llseek, \ .open = openf, \ .release = releasef \ }, \ } static const struct counter_info cntr_ops[] = { DEBUGFS_OPS("counter_names", dev_names_read, NULL), DEBUGFS_OPS("counters", dev_counters_read, NULL), DEBUGFS_OPS("portcounter_names", portnames_read, NULL), }; static const struct counter_info port_cntr_ops[] = { DEBUGFS_OPS("port%dcounters", portcntrs_debugfs_read, NULL), DEBUGFS_XOPS("i2c1", i2c1_debugfs_read, i2c1_debugfs_write, i2c1_debugfs_open, i2c1_debugfs_release), DEBUGFS_XOPS("i2c2", i2c2_debugfs_read, i2c2_debugfs_write, i2c2_debugfs_open, i2c2_debugfs_release), DEBUGFS_OPS("qsfp_dump%d", qsfp_debugfs_dump, NULL), DEBUGFS_XOPS("qsfp1", qsfp1_debugfs_read, qsfp1_debugfs_write, qsfp1_debugfs_open, qsfp1_debugfs_release), DEBUGFS_XOPS("qsfp2", qsfp2_debugfs_read, qsfp2_debugfs_write, qsfp2_debugfs_open, qsfp2_debugfs_release), DEBUGFS_XOPS("exprom_wp", exprom_wp_debugfs_read, exprom_wp_debugfs_write, exprom_wp_debugfs_open, exprom_wp_debugfs_release), DEBUGFS_OPS("asic_flags", asic_flags_read, asic_flags_write), DEBUGFS_OPS("dc8051_memory", dc8051_memory_read, NULL), DEBUGFS_OPS("lcb", debugfs_lcb_read, debugfs_lcb_write), }; static void *_sdma_cpu_list_seq_start(struct seq_file *s, loff_t *pos) { if (*pos >= num_online_cpus()) return NULL; return pos; } static void *_sdma_cpu_list_seq_next(struct seq_file *s, void *v, loff_t *pos) { ++*pos; if (*pos >= num_online_cpus()) return NULL; return pos; } static void _sdma_cpu_list_seq_stop(struct seq_file *s, void *v) { /* nothing allocated */ } static int _sdma_cpu_list_seq_show(struct seq_file *s, void *v) { struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private; struct hfi1_devdata *dd = dd_from_dev(ibd); loff_t *spos = v; loff_t i = *spos; sdma_seqfile_dump_cpu_list(s, dd, (unsigned long)i); return 0; } DEBUGFS_SEQ_FILE_OPS(sdma_cpu_list); DEBUGFS_SEQ_FILE_OPEN(sdma_cpu_list) DEBUGFS_FILE_OPS(sdma_cpu_list); void hfi1_dbg_ibdev_init(struct hfi1_ibdev *ibd) { char name[sizeof("port0counters") + 1]; char link[10]; struct hfi1_devdata *dd = dd_from_dev(ibd); struct hfi1_pportdata *ppd; struct dentry *root; int unit = dd->unit; int i, j; if (!hfi1_dbg_root) return; snprintf(name, sizeof(name), "%s_%d", class_name(), unit); snprintf(link, sizeof(link), "%d", unit); root = debugfs_create_dir(name, hfi1_dbg_root); ibd->hfi1_ibdev_dbg = root; ibd->hfi1_ibdev_link = debugfs_create_symlink(link, hfi1_dbg_root, name); debugfs_create_file("opcode_stats", 0444, root, ibd, &_opcode_stats_file_ops); debugfs_create_file("tx_opcode_stats", 0444, root, ibd, &_tx_opcode_stats_file_ops); debugfs_create_file("ctx_stats", 0444, root, ibd, &_ctx_stats_file_ops); debugfs_create_file("qp_stats", 0444, root, ibd, &_qp_stats_file_ops); debugfs_create_file("sdes", 0444, root, ibd, &_sdes_file_ops); debugfs_create_file("rcds", 0444, root, ibd, &_rcds_file_ops); debugfs_create_file("pios", 0444, root, ibd, &_pios_file_ops); debugfs_create_file("sdma_cpu_list", 0444, root, ibd, &_sdma_cpu_list_file_ops); /* dev counter files */ for (i = 0; i < ARRAY_SIZE(cntr_ops); i++) debugfs_create_file(cntr_ops[i].name, 0444, root, dd, &cntr_ops[i].ops); /* per port files */ for (ppd = dd->pport, j = 0; j < dd->num_pports; j++, ppd++) for (i = 0; i < ARRAY_SIZE(port_cntr_ops); i++) { snprintf(name, sizeof(name), port_cntr_ops[i].name, j + 1); debugfs_create_file(name, !port_cntr_ops[i].ops.write ? S_IRUGO : S_IRUGO | S_IWUSR, root, ppd, &port_cntr_ops[i].ops); } hfi1_fault_init_debugfs(ibd); } void hfi1_dbg_ibdev_exit(struct hfi1_ibdev *ibd) { if (!hfi1_dbg_root) goto out; hfi1_fault_exit_debugfs(ibd); debugfs_remove(ibd->hfi1_ibdev_link); debugfs_remove_recursive(ibd->hfi1_ibdev_dbg); out: ibd->hfi1_ibdev_dbg = NULL; } /* * driver stats field names, one line per stat, single string. Used by * programs like hfistats to print the stats in a way which works for * different versions of drivers, without changing program source. * if hfi1_ib_stats changes, this needs to change. Names need to be * 12 chars or less (w/o newline), for proper display by hfistats utility. */ static const char * const hfi1_statnames[] = { /* must be element 0*/ "KernIntr", "ErrorIntr", "Tx_Errs", "Rcv_Errs", "H/W_Errs", "NoPIOBufs", "CtxtsOpen", "RcvLen_Errs", "EgrBufFull", "EgrHdrFull" }; static void *_driver_stats_names_seq_start(struct seq_file *s, loff_t *pos) { if (*pos >= ARRAY_SIZE(hfi1_statnames)) return NULL; return pos; } static void *_driver_stats_names_seq_next( struct seq_file *s, void *v, loff_t *pos) { ++*pos; if (*pos >= ARRAY_SIZE(hfi1_statnames)) return NULL; return pos; } static void _driver_stats_names_seq_stop(struct seq_file *s, void *v) { } static int _driver_stats_names_seq_show(struct seq_file *s, void *v) { loff_t *spos = v; seq_printf(s, "%s\n", hfi1_statnames[*spos]); return 0; } DEBUGFS_SEQ_FILE_OPS(driver_stats_names); DEBUGFS_SEQ_FILE_OPEN(driver_stats_names) DEBUGFS_FILE_OPS(driver_stats_names); static void *_driver_stats_seq_start(struct seq_file *s, loff_t *pos) { if (*pos >= ARRAY_SIZE(hfi1_statnames)) return NULL; return pos; } static void *_driver_stats_seq_next(struct seq_file *s, void *v, loff_t *pos) { ++*pos; if (*pos >= ARRAY_SIZE(hfi1_statnames)) return NULL; return pos; } static void _driver_stats_seq_stop(struct seq_file *s, void *v) { } static void hfi1_sps_show_ints(struct seq_file *s) { unsigned long index, flags; struct hfi1_devdata *dd; u64 sps_ints = 0; xa_lock_irqsave(&hfi1_dev_table, flags); xa_for_each(&hfi1_dev_table, index, dd) { sps_ints += get_all_cpu_total(dd->int_counter); } xa_unlock_irqrestore(&hfi1_dev_table, flags); seq_write(s, &sps_ints, sizeof(u64)); } static int _driver_stats_seq_show(struct seq_file *s, void *v) { loff_t *spos = v; u64 *stats = (u64 *)&hfi1_stats; /* special case for interrupts */ if (*spos == 0) hfi1_sps_show_ints(s); else seq_write(s, stats + *spos, sizeof(u64)); return 0; } DEBUGFS_SEQ_FILE_OPS(driver_stats); DEBUGFS_SEQ_FILE_OPEN(driver_stats) DEBUGFS_FILE_OPS(driver_stats); void hfi1_dbg_init(void) { hfi1_dbg_root = debugfs_create_dir(DRIVER_NAME, NULL); debugfs_create_file("driver_stats_names", 0444, hfi1_dbg_root, NULL, &_driver_stats_names_file_ops); debugfs_create_file("driver_stats", 0444, hfi1_dbg_root, NULL, &_driver_stats_file_ops); } void hfi1_dbg_exit(void) { debugfs_remove_recursive(hfi1_dbg_root); hfi1_dbg_root = NULL; }
linux-master
drivers/infiniband/hw/hfi1/debugfs.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2015 - 2018 Intel Corporation. */ #include <linux/io.h> #include <rdma/rdma_vt.h> #include <rdma/rdmavt_qp.h> #include "hfi.h" #include "qp.h" #include "rc.h" #include "verbs_txreq.h" #include "trace.h" struct rvt_ack_entry *find_prev_entry(struct rvt_qp *qp, u32 psn, u8 *prev, u8 *prev_ack, bool *scheduled) __must_hold(&qp->s_lock) { struct rvt_ack_entry *e = NULL; u8 i, p; bool s = true; for (i = qp->r_head_ack_queue; ; i = p) { if (i == qp->s_tail_ack_queue) s = false; if (i) p = i - 1; else p = rvt_size_atomic(ib_to_rvt(qp->ibqp.device)); if (p == qp->r_head_ack_queue) { e = NULL; break; } e = &qp->s_ack_queue[p]; if (!e->opcode) { e = NULL; break; } if (cmp_psn(psn, e->psn) >= 0) { if (p == qp->s_tail_ack_queue && cmp_psn(psn, e->lpsn) <= 0) s = false; break; } } if (prev) *prev = p; if (prev_ack) *prev_ack = i; if (scheduled) *scheduled = s; return e; } /** * make_rc_ack - construct a response packet (ACK, NAK, or RDMA read) * @dev: the device for this QP * @qp: a pointer to the QP * @ohdr: a pointer to the IB header being constructed * @ps: the xmit packet state * * Return 1 if constructed; otherwise, return 0. * Note that we are in the responder's side of the QP context. * Note the QP s_lock must be held. */ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp, struct ib_other_headers *ohdr, struct hfi1_pkt_state *ps) { struct rvt_ack_entry *e; u32 hwords, hdrlen; u32 len = 0; u32 bth0 = 0, bth2 = 0; u32 bth1 = qp->remote_qpn | (HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT); int middle = 0; u32 pmtu = qp->pmtu; struct hfi1_qp_priv *qpriv = qp->priv; bool last_pkt; u32 delta; u8 next = qp->s_tail_ack_queue; struct tid_rdma_request *req; trace_hfi1_rsp_make_rc_ack(qp, 0); lockdep_assert_held(&qp->s_lock); /* Don't send an ACK if we aren't supposed to. */ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) goto bail; if (qpriv->hdr_type == HFI1_PKT_TYPE_9B) /* header size in 32-bit words LRH+BTH = (8+12)/4. */ hwords = 5; else /* header size in 32-bit words 16B LRH+BTH = (16+12)/4. */ hwords = 7; switch (qp->s_ack_state) { case OP(RDMA_READ_RESPONSE_LAST): case OP(RDMA_READ_RESPONSE_ONLY): e = &qp->s_ack_queue[qp->s_tail_ack_queue]; release_rdma_sge_mr(e); fallthrough; case OP(ATOMIC_ACKNOWLEDGE): /* * We can increment the tail pointer now that the last * response has been sent instead of only being * constructed. */ if (++next > rvt_size_atomic(&dev->rdi)) next = 0; /* * Only advance the s_acked_ack_queue pointer if there * have been no TID RDMA requests. */ e = &qp->s_ack_queue[qp->s_tail_ack_queue]; if (e->opcode != TID_OP(WRITE_REQ) && qp->s_acked_ack_queue == qp->s_tail_ack_queue) qp->s_acked_ack_queue = next; qp->s_tail_ack_queue = next; trace_hfi1_rsp_make_rc_ack(qp, e->psn); fallthrough; case OP(SEND_ONLY): case OP(ACKNOWLEDGE): /* Check for no next entry in the queue. */ if (qp->r_head_ack_queue == qp->s_tail_ack_queue) { if (qp->s_flags & RVT_S_ACK_PENDING) goto normal; goto bail; } e = &qp->s_ack_queue[qp->s_tail_ack_queue]; /* Check for tid write fence */ if ((qpriv->s_flags & HFI1_R_TID_WAIT_INTERLCK) || hfi1_tid_rdma_ack_interlock(qp, e)) { iowait_set_flag(&qpriv->s_iowait, IOWAIT_PENDING_IB); goto bail; } if (e->opcode == OP(RDMA_READ_REQUEST)) { /* * If a RDMA read response is being resent and * we haven't seen the duplicate request yet, * then stop sending the remaining responses the * responder has seen until the requester re-sends it. */ len = e->rdma_sge.sge_length; if (len && !e->rdma_sge.mr) { if (qp->s_acked_ack_queue == qp->s_tail_ack_queue) qp->s_acked_ack_queue = qp->r_head_ack_queue; qp->s_tail_ack_queue = qp->r_head_ack_queue; goto bail; } /* Copy SGE state in case we need to resend */ ps->s_txreq->mr = e->rdma_sge.mr; if (ps->s_txreq->mr) rvt_get_mr(ps->s_txreq->mr); qp->s_ack_rdma_sge.sge = e->rdma_sge; qp->s_ack_rdma_sge.num_sge = 1; ps->s_txreq->ss = &qp->s_ack_rdma_sge; if (len > pmtu) { len = pmtu; qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST); } else { qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY); e->sent = 1; } ohdr->u.aeth = rvt_compute_aeth(qp); hwords++; qp->s_ack_rdma_psn = e->psn; bth2 = mask_psn(qp->s_ack_rdma_psn++); } else if (e->opcode == TID_OP(WRITE_REQ)) { /* * If a TID RDMA WRITE RESP is being resent, we have to * wait for the actual request. All requests that are to * be resent will have their state set to * TID_REQUEST_RESEND. When the new request arrives, the * state will be changed to TID_REQUEST_RESEND_ACTIVE. */ req = ack_to_tid_req(e); if (req->state == TID_REQUEST_RESEND || req->state == TID_REQUEST_INIT_RESEND) goto bail; qp->s_ack_state = TID_OP(WRITE_RESP); qp->s_ack_rdma_psn = mask_psn(e->psn + req->cur_seg); goto write_resp; } else if (e->opcode == TID_OP(READ_REQ)) { /* * If a TID RDMA read response is being resent and * we haven't seen the duplicate request yet, * then stop sending the remaining responses the * responder has seen until the requester re-sends it. */ len = e->rdma_sge.sge_length; if (len && !e->rdma_sge.mr) { if (qp->s_acked_ack_queue == qp->s_tail_ack_queue) qp->s_acked_ack_queue = qp->r_head_ack_queue; qp->s_tail_ack_queue = qp->r_head_ack_queue; goto bail; } /* Copy SGE state in case we need to resend */ ps->s_txreq->mr = e->rdma_sge.mr; if (ps->s_txreq->mr) rvt_get_mr(ps->s_txreq->mr); qp->s_ack_rdma_sge.sge = e->rdma_sge; qp->s_ack_rdma_sge.num_sge = 1; qp->s_ack_state = TID_OP(READ_RESP); goto read_resp; } else { /* COMPARE_SWAP or FETCH_ADD */ ps->s_txreq->ss = NULL; len = 0; qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE); ohdr->u.at.aeth = rvt_compute_aeth(qp); ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth); hwords += sizeof(ohdr->u.at) / sizeof(u32); bth2 = mask_psn(e->psn); e->sent = 1; } trace_hfi1_tid_write_rsp_make_rc_ack(qp); bth0 = qp->s_ack_state << 24; break; case OP(RDMA_READ_RESPONSE_FIRST): qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE); fallthrough; case OP(RDMA_READ_RESPONSE_MIDDLE): ps->s_txreq->ss = &qp->s_ack_rdma_sge; ps->s_txreq->mr = qp->s_ack_rdma_sge.sge.mr; if (ps->s_txreq->mr) rvt_get_mr(ps->s_txreq->mr); len = qp->s_ack_rdma_sge.sge.sge_length; if (len > pmtu) { len = pmtu; middle = HFI1_CAP_IS_KSET(SDMA_AHG); } else { ohdr->u.aeth = rvt_compute_aeth(qp); hwords++; qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST); e = &qp->s_ack_queue[qp->s_tail_ack_queue]; e->sent = 1; } bth0 = qp->s_ack_state << 24; bth2 = mask_psn(qp->s_ack_rdma_psn++); break; case TID_OP(WRITE_RESP): write_resp: /* * 1. Check if RVT_S_ACK_PENDING is set. If yes, * goto normal. * 2. Attempt to allocate TID resources. * 3. Remove RVT_S_RESP_PENDING flags from s_flags * 4. If resources not available: * 4.1 Set RVT_S_WAIT_TID_SPACE * 4.2 Queue QP on RCD TID queue * 4.3 Put QP on iowait list. * 4.4 Build IB RNR NAK with appropriate timeout value * 4.5 Return indication progress made. * 5. If resources are available: * 5.1 Program HW flow CSRs * 5.2 Build TID RDMA WRITE RESP packet * 5.3 If more resources needed, do 2.1 - 2.3. * 5.4 Wake up next QP on RCD TID queue. * 5.5 Return indication progress made. */ e = &qp->s_ack_queue[qp->s_tail_ack_queue]; req = ack_to_tid_req(e); /* * Send scheduled RNR NAK's. RNR NAK's need to be sent at * segment boundaries, not at request boundaries. Don't change * s_ack_state because we are still in the middle of a request */ if (qpriv->rnr_nak_state == TID_RNR_NAK_SEND && qp->s_tail_ack_queue == qpriv->r_tid_alloc && req->cur_seg == req->alloc_seg) { qpriv->rnr_nak_state = TID_RNR_NAK_SENT; goto normal_no_state; } bth2 = mask_psn(qp->s_ack_rdma_psn); hdrlen = hfi1_build_tid_rdma_write_resp(qp, e, ohdr, &bth1, bth2, &len, &ps->s_txreq->ss); if (!hdrlen) return 0; hwords += hdrlen; bth0 = qp->s_ack_state << 24; qp->s_ack_rdma_psn++; trace_hfi1_tid_req_make_rc_ack_write(qp, 0, e->opcode, e->psn, e->lpsn, req); if (req->cur_seg != req->total_segs) break; e->sent = 1; /* Do not free e->rdma_sge until all data are received */ qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE); break; case TID_OP(READ_RESP): read_resp: e = &qp->s_ack_queue[qp->s_tail_ack_queue]; ps->s_txreq->ss = &qp->s_ack_rdma_sge; delta = hfi1_build_tid_rdma_read_resp(qp, e, ohdr, &bth0, &bth1, &bth2, &len, &last_pkt); if (delta == 0) goto error_qp; hwords += delta; if (last_pkt) { e->sent = 1; /* * Increment qp->s_tail_ack_queue through s_ack_state * transition. */ qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST); } break; case TID_OP(READ_REQ): goto bail; default: normal: /* * Send a regular ACK. * Set the s_ack_state so we wait until after sending * the ACK before setting s_ack_state to ACKNOWLEDGE * (see above). */ qp->s_ack_state = OP(SEND_ONLY); normal_no_state: if (qp->s_nak_state) ohdr->u.aeth = cpu_to_be32((qp->r_msn & IB_MSN_MASK) | (qp->s_nak_state << IB_AETH_CREDIT_SHIFT)); else ohdr->u.aeth = rvt_compute_aeth(qp); hwords++; len = 0; bth0 = OP(ACKNOWLEDGE) << 24; bth2 = mask_psn(qp->s_ack_psn); qp->s_flags &= ~RVT_S_ACK_PENDING; ps->s_txreq->txreq.flags |= SDMA_TXREQ_F_VIP; ps->s_txreq->ss = NULL; } qp->s_rdma_ack_cnt++; ps->s_txreq->sde = qpriv->s_sde; ps->s_txreq->s_cur_size = len; ps->s_txreq->hdr_dwords = hwords; hfi1_make_ruc_header(qp, ohdr, bth0, bth1, bth2, middle, ps); return 1; error_qp: spin_unlock_irqrestore(&qp->s_lock, ps->flags); spin_lock_irqsave(&qp->r_lock, ps->flags); spin_lock(&qp->s_lock); rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); spin_unlock(&qp->s_lock); spin_unlock_irqrestore(&qp->r_lock, ps->flags); spin_lock_irqsave(&qp->s_lock, ps->flags); bail: qp->s_ack_state = OP(ACKNOWLEDGE); /* * Ensure s_rdma_ack_cnt changes are committed prior to resetting * RVT_S_RESP_PENDING */ smp_wmb(); qp->s_flags &= ~(RVT_S_RESP_PENDING | RVT_S_ACK_PENDING | HFI1_S_AHG_VALID); return 0; } /** * hfi1_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC) * @qp: a pointer to the QP * @ps: the current packet state * * Assumes s_lock is held. * * Return 1 if constructed; otherwise, return 0. */ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) { struct hfi1_qp_priv *priv = qp->priv; struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); struct ib_other_headers *ohdr; struct rvt_sge_state *ss = NULL; struct rvt_swqe *wqe; struct hfi1_swqe_priv *wpriv; struct tid_rdma_request *req = NULL; /* header size in 32-bit words LRH+BTH = (8+12)/4. */ u32 hwords = 5; u32 len = 0; u32 bth0 = 0, bth2 = 0; u32 bth1 = qp->remote_qpn | (HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT); u32 pmtu = qp->pmtu; char newreq; int middle = 0; int delta; struct tid_rdma_flow *flow = NULL; struct tid_rdma_params *remote; trace_hfi1_sender_make_rc_req(qp); lockdep_assert_held(&qp->s_lock); ps->s_txreq = get_txreq(ps->dev, qp); if (!ps->s_txreq) goto bail_no_tx; if (priv->hdr_type == HFI1_PKT_TYPE_9B) { /* header size in 32-bit words LRH+BTH = (8+12)/4. */ hwords = 5; if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) ohdr = &ps->s_txreq->phdr.hdr.ibh.u.l.oth; else ohdr = &ps->s_txreq->phdr.hdr.ibh.u.oth; } else { /* header size in 32-bit words 16B LRH+BTH = (16+12)/4. */ hwords = 7; if ((rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) && (hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr)))) ohdr = &ps->s_txreq->phdr.hdr.opah.u.l.oth; else ohdr = &ps->s_txreq->phdr.hdr.opah.u.oth; } /* Sending responses has higher priority over sending requests. */ if ((qp->s_flags & RVT_S_RESP_PENDING) && make_rc_ack(dev, qp, ohdr, ps)) return 1; if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) goto bail; /* We are in the error state, flush the work request. */ if (qp->s_last == READ_ONCE(qp->s_head)) goto bail; /* If DMAs are in progress, we can't flush immediately. */ if (iowait_sdma_pending(&priv->s_iowait)) { qp->s_flags |= RVT_S_WAIT_DMA; goto bail; } clear_ahg(qp); wqe = rvt_get_swqe_ptr(qp, qp->s_last); hfi1_trdma_send_complete(qp, wqe, qp->s_last != qp->s_acked ? IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR); /* will get called again */ goto done_free_tx; } if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK | HFI1_S_WAIT_HALT)) goto bail; if (cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) { if (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) { qp->s_flags |= RVT_S_WAIT_PSN; goto bail; } qp->s_sending_psn = qp->s_psn; qp->s_sending_hpsn = qp->s_psn - 1; } /* Send a request. */ wqe = rvt_get_swqe_ptr(qp, qp->s_cur); check_s_state: switch (qp->s_state) { default: if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) goto bail; /* * Resend an old request or start a new one. * * We keep track of the current SWQE so that * we don't reset the "furthest progress" state * if we need to back up. */ newreq = 0; if (qp->s_cur == qp->s_tail) { /* Check if send work queue is empty. */ if (qp->s_tail == READ_ONCE(qp->s_head)) { clear_ahg(qp); goto bail; } /* * If a fence is requested, wait for previous * RDMA read and atomic operations to finish. * However, there is no need to guard against * TID RDMA READ after TID RDMA READ. */ if ((wqe->wr.send_flags & IB_SEND_FENCE) && qp->s_num_rd_atomic && (wqe->wr.opcode != IB_WR_TID_RDMA_READ || priv->pending_tid_r_segs < qp->s_num_rd_atomic)) { qp->s_flags |= RVT_S_WAIT_FENCE; goto bail; } /* * Local operations are processed immediately * after all prior requests have completed */ if (wqe->wr.opcode == IB_WR_REG_MR || wqe->wr.opcode == IB_WR_LOCAL_INV) { int local_ops = 0; int err = 0; if (qp->s_last != qp->s_cur) goto bail; if (++qp->s_cur == qp->s_size) qp->s_cur = 0; if (++qp->s_tail == qp->s_size) qp->s_tail = 0; if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) { err = rvt_invalidate_rkey( qp, wqe->wr.ex.invalidate_rkey); local_ops = 1; } rvt_send_complete(qp, wqe, err ? IB_WC_LOC_PROT_ERR : IB_WC_SUCCESS); if (local_ops) atomic_dec(&qp->local_ops_pending); goto done_free_tx; } newreq = 1; qp->s_psn = wqe->psn; } /* * Note that we have to be careful not to modify the * original work request since we may need to resend * it. */ len = wqe->length; ss = &qp->s_sge; bth2 = mask_psn(qp->s_psn); /* * Interlock between various IB requests and TID RDMA * if necessary. */ if ((priv->s_flags & HFI1_S_TID_WAIT_INTERLCK) || hfi1_tid_rdma_wqe_interlock(qp, wqe)) goto bail; switch (wqe->wr.opcode) { case IB_WR_SEND: case IB_WR_SEND_WITH_IMM: case IB_WR_SEND_WITH_INV: /* If no credit, return. */ if (!rvt_rc_credit_avail(qp, wqe)) goto bail; if (len > pmtu) { qp->s_state = OP(SEND_FIRST); len = pmtu; break; } if (wqe->wr.opcode == IB_WR_SEND) { qp->s_state = OP(SEND_ONLY); } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE); /* Immediate data comes after the BTH */ ohdr->u.imm_data = wqe->wr.ex.imm_data; hwords += 1; } else { qp->s_state = OP(SEND_ONLY_WITH_INVALIDATE); /* Invalidate rkey comes after the BTH */ ohdr->u.ieth = cpu_to_be32( wqe->wr.ex.invalidate_rkey); hwords += 1; } if (wqe->wr.send_flags & IB_SEND_SOLICITED) bth0 |= IB_BTH_SOLICITED; bth2 |= IB_BTH_REQ_ACK; if (++qp->s_cur == qp->s_size) qp->s_cur = 0; break; case IB_WR_RDMA_WRITE: if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) qp->s_lsn++; goto no_flow_control; case IB_WR_RDMA_WRITE_WITH_IMM: /* If no credit, return. */ if (!rvt_rc_credit_avail(qp, wqe)) goto bail; no_flow_control: put_ib_reth_vaddr( wqe->rdma_wr.remote_addr, &ohdr->u.rc.reth); ohdr->u.rc.reth.rkey = cpu_to_be32(wqe->rdma_wr.rkey); ohdr->u.rc.reth.length = cpu_to_be32(len); hwords += sizeof(struct ib_reth) / sizeof(u32); if (len > pmtu) { qp->s_state = OP(RDMA_WRITE_FIRST); len = pmtu; break; } if (wqe->wr.opcode == IB_WR_RDMA_WRITE) { qp->s_state = OP(RDMA_WRITE_ONLY); } else { qp->s_state = OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE); /* Immediate data comes after RETH */ ohdr->u.rc.imm_data = wqe->wr.ex.imm_data; hwords += 1; if (wqe->wr.send_flags & IB_SEND_SOLICITED) bth0 |= IB_BTH_SOLICITED; } bth2 |= IB_BTH_REQ_ACK; if (++qp->s_cur == qp->s_size) qp->s_cur = 0; break; case IB_WR_TID_RDMA_WRITE: if (newreq) { /* * Limit the number of TID RDMA WRITE requests. */ if (atomic_read(&priv->n_tid_requests) >= HFI1_TID_RDMA_WRITE_CNT) goto bail; if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) qp->s_lsn++; } hwords += hfi1_build_tid_rdma_write_req(qp, wqe, ohdr, &bth1, &bth2, &len); ss = NULL; if (priv->s_tid_cur == HFI1_QP_WQE_INVALID) { priv->s_tid_cur = qp->s_cur; if (priv->s_tid_tail == HFI1_QP_WQE_INVALID) { priv->s_tid_tail = qp->s_cur; priv->s_state = TID_OP(WRITE_RESP); } } else if (priv->s_tid_cur == priv->s_tid_head) { struct rvt_swqe *__w; struct tid_rdma_request *__r; __w = rvt_get_swqe_ptr(qp, priv->s_tid_cur); __r = wqe_to_tid_req(__w); /* * The s_tid_cur pointer is advanced to s_cur if * any of the following conditions about the WQE * to which s_ti_cur currently points to are * satisfied: * 1. The request is not a TID RDMA WRITE * request, * 2. The request is in the INACTIVE or * COMPLETE states (TID RDMA READ requests * stay at INACTIVE and TID RDMA WRITE * transition to COMPLETE when done), * 3. The request is in the ACTIVE or SYNC * state and the number of completed * segments is equal to the total segment * count. * (If ACTIVE, the request is waiting for * ACKs. If SYNC, the request has not * received any responses because it's * waiting on a sync point.) */ if (__w->wr.opcode != IB_WR_TID_RDMA_WRITE || __r->state == TID_REQUEST_INACTIVE || __r->state == TID_REQUEST_COMPLETE || ((__r->state == TID_REQUEST_ACTIVE || __r->state == TID_REQUEST_SYNC) && __r->comp_seg == __r->total_segs)) { if (priv->s_tid_tail == priv->s_tid_cur && priv->s_state == TID_OP(WRITE_DATA_LAST)) { priv->s_tid_tail = qp->s_cur; priv->s_state = TID_OP(WRITE_RESP); } priv->s_tid_cur = qp->s_cur; } /* * A corner case: when the last TID RDMA WRITE * request was completed, s_tid_head, * s_tid_cur, and s_tid_tail all point to the * same location. Other requests are posted and * s_cur wraps around to the same location, * where a new TID RDMA WRITE is posted. In * this case, none of the indices need to be * updated. However, the priv->s_state should. */ if (priv->s_tid_tail == qp->s_cur && priv->s_state == TID_OP(WRITE_DATA_LAST)) priv->s_state = TID_OP(WRITE_RESP); } req = wqe_to_tid_req(wqe); if (newreq) { priv->s_tid_head = qp->s_cur; priv->pending_tid_w_resp += req->total_segs; atomic_inc(&priv->n_tid_requests); atomic_dec(&priv->n_requests); } else { req->state = TID_REQUEST_RESEND; req->comp_seg = delta_psn(bth2, wqe->psn); /* * Pull back any segments since we are going * to re-receive them. */ req->setup_head = req->clear_tail; priv->pending_tid_w_resp += delta_psn(wqe->lpsn, bth2) + 1; } trace_hfi1_tid_write_sender_make_req(qp, newreq); trace_hfi1_tid_req_make_req_write(qp, newreq, wqe->wr.opcode, wqe->psn, wqe->lpsn, req); if (++qp->s_cur == qp->s_size) qp->s_cur = 0; break; case IB_WR_RDMA_READ: /* * Don't allow more operations to be started * than the QP limits allow. */ if (qp->s_num_rd_atomic >= qp->s_max_rd_atomic) { qp->s_flags |= RVT_S_WAIT_RDMAR; goto bail; } qp->s_num_rd_atomic++; if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) qp->s_lsn++; put_ib_reth_vaddr( wqe->rdma_wr.remote_addr, &ohdr->u.rc.reth); ohdr->u.rc.reth.rkey = cpu_to_be32(wqe->rdma_wr.rkey); ohdr->u.rc.reth.length = cpu_to_be32(len); qp->s_state = OP(RDMA_READ_REQUEST); hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); ss = NULL; len = 0; bth2 |= IB_BTH_REQ_ACK; if (++qp->s_cur == qp->s_size) qp->s_cur = 0; break; case IB_WR_TID_RDMA_READ: trace_hfi1_tid_read_sender_make_req(qp, newreq); wpriv = wqe->priv; req = wqe_to_tid_req(wqe); trace_hfi1_tid_req_make_req_read(qp, newreq, wqe->wr.opcode, wqe->psn, wqe->lpsn, req); delta = cmp_psn(qp->s_psn, wqe->psn); /* * Don't allow more operations to be started * than the QP limits allow. We could get here under * three conditions; (1) It's a new request; (2) We are * sending the second or later segment of a request, * but the qp->s_state is set to OP(RDMA_READ_REQUEST) * when the last segment of a previous request is * received just before this; (3) We are re-sending a * request. */ if (qp->s_num_rd_atomic >= qp->s_max_rd_atomic) { qp->s_flags |= RVT_S_WAIT_RDMAR; goto bail; } if (newreq) { struct tid_rdma_flow *flow = &req->flows[req->setup_head]; /* * Set up s_sge as it is needed for TID * allocation. However, if the pages have been * walked and mapped, skip it. An earlier try * has failed to allocate the TID entries. */ if (!flow->npagesets) { qp->s_sge.sge = wqe->sg_list[0]; qp->s_sge.sg_list = wqe->sg_list + 1; qp->s_sge.num_sge = wqe->wr.num_sge; qp->s_sge.total_len = wqe->length; qp->s_len = wqe->length; req->isge = 0; req->clear_tail = req->setup_head; req->flow_idx = req->setup_head; req->state = TID_REQUEST_ACTIVE; } } else if (delta == 0) { /* Re-send a request */ req->cur_seg = 0; req->comp_seg = 0; req->ack_pending = 0; req->flow_idx = req->clear_tail; req->state = TID_REQUEST_RESEND; } req->s_next_psn = qp->s_psn; /* Read one segment at a time */ len = min_t(u32, req->seg_len, wqe->length - req->seg_len * req->cur_seg); delta = hfi1_build_tid_rdma_read_req(qp, wqe, ohdr, &bth1, &bth2, &len); if (delta <= 0) { /* Wait for TID space */ goto bail; } if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) qp->s_lsn++; hwords += delta; ss = &wpriv->ss; /* Check if this is the last segment */ if (req->cur_seg >= req->total_segs && ++qp->s_cur == qp->s_size) qp->s_cur = 0; break; case IB_WR_ATOMIC_CMP_AND_SWP: case IB_WR_ATOMIC_FETCH_AND_ADD: /* * Don't allow more operations to be started * than the QP limits allow. */ if (qp->s_num_rd_atomic >= qp->s_max_rd_atomic) { qp->s_flags |= RVT_S_WAIT_RDMAR; goto bail; } qp->s_num_rd_atomic++; fallthrough; case IB_WR_OPFN: if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) qp->s_lsn++; if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || wqe->wr.opcode == IB_WR_OPFN) { qp->s_state = OP(COMPARE_SWAP); put_ib_ateth_swap(wqe->atomic_wr.swap, &ohdr->u.atomic_eth); put_ib_ateth_compare(wqe->atomic_wr.compare_add, &ohdr->u.atomic_eth); } else { qp->s_state = OP(FETCH_ADD); put_ib_ateth_swap(wqe->atomic_wr.compare_add, &ohdr->u.atomic_eth); put_ib_ateth_compare(0, &ohdr->u.atomic_eth); } put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr, &ohdr->u.atomic_eth); ohdr->u.atomic_eth.rkey = cpu_to_be32( wqe->atomic_wr.rkey); hwords += sizeof(struct ib_atomic_eth) / sizeof(u32); ss = NULL; len = 0; bth2 |= IB_BTH_REQ_ACK; if (++qp->s_cur == qp->s_size) qp->s_cur = 0; break; default: goto bail; } if (wqe->wr.opcode != IB_WR_TID_RDMA_READ) { qp->s_sge.sge = wqe->sg_list[0]; qp->s_sge.sg_list = wqe->sg_list + 1; qp->s_sge.num_sge = wqe->wr.num_sge; qp->s_sge.total_len = wqe->length; qp->s_len = wqe->length; } if (newreq) { qp->s_tail++; if (qp->s_tail >= qp->s_size) qp->s_tail = 0; } if (wqe->wr.opcode == IB_WR_RDMA_READ || wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) qp->s_psn = wqe->lpsn + 1; else if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) qp->s_psn = req->s_next_psn; else qp->s_psn++; break; case OP(RDMA_READ_RESPONSE_FIRST): /* * qp->s_state is normally set to the opcode of the * last packet constructed for new requests and therefore * is never set to RDMA read response. * RDMA_READ_RESPONSE_FIRST is used by the ACK processing * thread to indicate a SEND needs to be restarted from an * earlier PSN without interfering with the sending thread. * See restart_rc(). */ qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu); fallthrough; case OP(SEND_FIRST): qp->s_state = OP(SEND_MIDDLE); fallthrough; case OP(SEND_MIDDLE): bth2 = mask_psn(qp->s_psn++); ss = &qp->s_sge; len = qp->s_len; if (len > pmtu) { len = pmtu; middle = HFI1_CAP_IS_KSET(SDMA_AHG); break; } if (wqe->wr.opcode == IB_WR_SEND) { qp->s_state = OP(SEND_LAST); } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); /* Immediate data comes after the BTH */ ohdr->u.imm_data = wqe->wr.ex.imm_data; hwords += 1; } else { qp->s_state = OP(SEND_LAST_WITH_INVALIDATE); /* invalidate data comes after the BTH */ ohdr->u.ieth = cpu_to_be32(wqe->wr.ex.invalidate_rkey); hwords += 1; } if (wqe->wr.send_flags & IB_SEND_SOLICITED) bth0 |= IB_BTH_SOLICITED; bth2 |= IB_BTH_REQ_ACK; qp->s_cur++; if (qp->s_cur >= qp->s_size) qp->s_cur = 0; break; case OP(RDMA_READ_RESPONSE_LAST): /* * qp->s_state is normally set to the opcode of the * last packet constructed for new requests and therefore * is never set to RDMA read response. * RDMA_READ_RESPONSE_LAST is used by the ACK processing * thread to indicate a RDMA write needs to be restarted from * an earlier PSN without interfering with the sending thread. * See restart_rc(). */ qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu); fallthrough; case OP(RDMA_WRITE_FIRST): qp->s_state = OP(RDMA_WRITE_MIDDLE); fallthrough; case OP(RDMA_WRITE_MIDDLE): bth2 = mask_psn(qp->s_psn++); ss = &qp->s_sge; len = qp->s_len; if (len > pmtu) { len = pmtu; middle = HFI1_CAP_IS_KSET(SDMA_AHG); break; } if (wqe->wr.opcode == IB_WR_RDMA_WRITE) { qp->s_state = OP(RDMA_WRITE_LAST); } else { qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE); /* Immediate data comes after the BTH */ ohdr->u.imm_data = wqe->wr.ex.imm_data; hwords += 1; if (wqe->wr.send_flags & IB_SEND_SOLICITED) bth0 |= IB_BTH_SOLICITED; } bth2 |= IB_BTH_REQ_ACK; qp->s_cur++; if (qp->s_cur >= qp->s_size) qp->s_cur = 0; break; case OP(RDMA_READ_RESPONSE_MIDDLE): /* * qp->s_state is normally set to the opcode of the * last packet constructed for new requests and therefore * is never set to RDMA read response. * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing * thread to indicate a RDMA read needs to be restarted from * an earlier PSN without interfering with the sending thread. * See restart_rc(). */ len = (delta_psn(qp->s_psn, wqe->psn)) * pmtu; put_ib_reth_vaddr( wqe->rdma_wr.remote_addr + len, &ohdr->u.rc.reth); ohdr->u.rc.reth.rkey = cpu_to_be32(wqe->rdma_wr.rkey); ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len); qp->s_state = OP(RDMA_READ_REQUEST); hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); bth2 = mask_psn(qp->s_psn) | IB_BTH_REQ_ACK; qp->s_psn = wqe->lpsn + 1; ss = NULL; len = 0; qp->s_cur++; if (qp->s_cur == qp->s_size) qp->s_cur = 0; break; case TID_OP(WRITE_RESP): /* * This value for s_state is used for restarting a TID RDMA * WRITE request. See comment in OP(RDMA_READ_RESPONSE_MIDDLE * for more). */ req = wqe_to_tid_req(wqe); req->state = TID_REQUEST_RESEND; rcu_read_lock(); remote = rcu_dereference(priv->tid_rdma.remote); req->comp_seg = delta_psn(qp->s_psn, wqe->psn); len = wqe->length - (req->comp_seg * remote->max_len); rcu_read_unlock(); bth2 = mask_psn(qp->s_psn); hwords += hfi1_build_tid_rdma_write_req(qp, wqe, ohdr, &bth1, &bth2, &len); qp->s_psn = wqe->lpsn + 1; ss = NULL; qp->s_state = TID_OP(WRITE_REQ); priv->pending_tid_w_resp += delta_psn(wqe->lpsn, bth2) + 1; priv->s_tid_cur = qp->s_cur; if (++qp->s_cur == qp->s_size) qp->s_cur = 0; trace_hfi1_tid_req_make_req_write(qp, 0, wqe->wr.opcode, wqe->psn, wqe->lpsn, req); break; case TID_OP(READ_RESP): if (wqe->wr.opcode != IB_WR_TID_RDMA_READ) goto bail; /* This is used to restart a TID read request */ req = wqe_to_tid_req(wqe); wpriv = wqe->priv; /* * Back down. The field qp->s_psn has been set to the psn with * which the request should be restart. It's OK to use division * as this is on the retry path. */ req->cur_seg = delta_psn(qp->s_psn, wqe->psn) / priv->pkts_ps; /* * The following function need to be redefined to return the * status to make sure that we find the flow. At the same * time, we can use the req->state change to check if the * call succeeds or not. */ req->state = TID_REQUEST_RESEND; hfi1_tid_rdma_restart_req(qp, wqe, &bth2); if (req->state != TID_REQUEST_ACTIVE) { /* * Failed to find the flow. Release all allocated tid * resources. */ hfi1_kern_exp_rcv_clear_all(req); hfi1_kern_clear_hw_flow(priv->rcd, qp); hfi1_trdma_send_complete(qp, wqe, IB_WC_LOC_QP_OP_ERR); goto bail; } req->state = TID_REQUEST_RESEND; len = min_t(u32, req->seg_len, wqe->length - req->seg_len * req->cur_seg); flow = &req->flows[req->flow_idx]; len -= flow->sent; req->s_next_psn = flow->flow_state.ib_lpsn + 1; delta = hfi1_build_tid_rdma_read_packet(wqe, ohdr, &bth1, &bth2, &len); if (delta <= 0) { /* Wait for TID space */ goto bail; } hwords += delta; ss = &wpriv->ss; /* Check if this is the last segment */ if (req->cur_seg >= req->total_segs && ++qp->s_cur == qp->s_size) qp->s_cur = 0; qp->s_psn = req->s_next_psn; trace_hfi1_tid_req_make_req_read(qp, 0, wqe->wr.opcode, wqe->psn, wqe->lpsn, req); break; case TID_OP(READ_REQ): req = wqe_to_tid_req(wqe); delta = cmp_psn(qp->s_psn, wqe->psn); /* * If the current WR is not TID RDMA READ, or this is the start * of a new request, we need to change the qp->s_state so that * the request can be set up properly. */ if (wqe->wr.opcode != IB_WR_TID_RDMA_READ || delta == 0 || qp->s_cur == qp->s_tail) { qp->s_state = OP(RDMA_READ_REQUEST); if (delta == 0 || qp->s_cur == qp->s_tail) goto check_s_state; else goto bail; } /* Rate limiting */ if (qp->s_num_rd_atomic >= qp->s_max_rd_atomic) { qp->s_flags |= RVT_S_WAIT_RDMAR; goto bail; } wpriv = wqe->priv; /* Read one segment at a time */ len = min_t(u32, req->seg_len, wqe->length - req->seg_len * req->cur_seg); delta = hfi1_build_tid_rdma_read_req(qp, wqe, ohdr, &bth1, &bth2, &len); if (delta <= 0) { /* Wait for TID space */ goto bail; } hwords += delta; ss = &wpriv->ss; /* Check if this is the last segment */ if (req->cur_seg >= req->total_segs && ++qp->s_cur == qp->s_size) qp->s_cur = 0; qp->s_psn = req->s_next_psn; trace_hfi1_tid_req_make_req_read(qp, 0, wqe->wr.opcode, wqe->psn, wqe->lpsn, req); break; } qp->s_sending_hpsn = bth2; delta = delta_psn(bth2, wqe->psn); if (delta && delta % HFI1_PSN_CREDIT == 0 && wqe->wr.opcode != IB_WR_TID_RDMA_WRITE) bth2 |= IB_BTH_REQ_ACK; if (qp->s_flags & RVT_S_SEND_ONE) { qp->s_flags &= ~RVT_S_SEND_ONE; qp->s_flags |= RVT_S_WAIT_ACK; bth2 |= IB_BTH_REQ_ACK; } qp->s_len -= len; ps->s_txreq->hdr_dwords = hwords; ps->s_txreq->sde = priv->s_sde; ps->s_txreq->ss = ss; ps->s_txreq->s_cur_size = len; hfi1_make_ruc_header( qp, ohdr, bth0 | (qp->s_state << 24), bth1, bth2, middle, ps); return 1; done_free_tx: hfi1_put_txreq(ps->s_txreq); ps->s_txreq = NULL; return 1; bail: hfi1_put_txreq(ps->s_txreq); bail_no_tx: ps->s_txreq = NULL; qp->s_flags &= ~RVT_S_BUSY; /* * If we didn't get a txreq, the QP will be woken up later to try * again. Set the flags to indicate which work item to wake * up. */ iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_IB); return 0; } static inline void hfi1_make_bth_aeth(struct rvt_qp *qp, struct ib_other_headers *ohdr, u32 bth0, u32 bth1) { if (qp->r_nak_state) ohdr->u.aeth = cpu_to_be32((qp->r_msn & IB_MSN_MASK) | (qp->r_nak_state << IB_AETH_CREDIT_SHIFT)); else ohdr->u.aeth = rvt_compute_aeth(qp); ohdr->bth[0] = cpu_to_be32(bth0); ohdr->bth[1] = cpu_to_be32(bth1 | qp->remote_qpn); ohdr->bth[2] = cpu_to_be32(mask_psn(qp->r_ack_psn)); } static inline void hfi1_queue_rc_ack(struct hfi1_packet *packet, bool is_fecn) { struct rvt_qp *qp = packet->qp; struct hfi1_ibport *ibp; unsigned long flags; spin_lock_irqsave(&qp->s_lock, flags); if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) goto unlock; ibp = rcd_to_iport(packet->rcd); this_cpu_inc(*ibp->rvp.rc_qacks); qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING; qp->s_nak_state = qp->r_nak_state; qp->s_ack_psn = qp->r_ack_psn; if (is_fecn) qp->s_flags |= RVT_S_ECN; /* Schedule the send tasklet. */ hfi1_schedule_send(qp); unlock: spin_unlock_irqrestore(&qp->s_lock, flags); } static inline void hfi1_make_rc_ack_9B(struct hfi1_packet *packet, struct hfi1_opa_header *opa_hdr, u8 sc5, bool is_fecn, u64 *pbc_flags, u32 *hwords, u32 *nwords) { struct rvt_qp *qp = packet->qp; struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); struct ib_header *hdr = &opa_hdr->ibh; struct ib_other_headers *ohdr; u16 lrh0 = HFI1_LRH_BTH; u16 pkey; u32 bth0, bth1; opa_hdr->hdr_type = HFI1_PKT_TYPE_9B; ohdr = &hdr->u.oth; /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4 */ *hwords = 6; if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) { *hwords += hfi1_make_grh(ibp, &hdr->u.l.grh, rdma_ah_read_grh(&qp->remote_ah_attr), *hwords - 2, SIZE_OF_CRC); ohdr = &hdr->u.l.oth; lrh0 = HFI1_LRH_GRH; } /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */ *pbc_flags |= ((!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT); /* read pkey_index w/o lock (its atomic) */ pkey = hfi1_get_pkey(ibp, qp->s_pkey_index); lrh0 |= (sc5 & IB_SC_MASK) << IB_SC_SHIFT | (rdma_ah_get_sl(&qp->remote_ah_attr) & IB_SL_MASK) << IB_SL_SHIFT; hfi1_make_ib_hdr(hdr, lrh0, *hwords + SIZE_OF_CRC, opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr), 9B), ppd->lid | rdma_ah_get_path_bits(&qp->remote_ah_attr)); bth0 = pkey | (OP(ACKNOWLEDGE) << 24); if (qp->s_mig_state == IB_MIG_MIGRATED) bth0 |= IB_BTH_MIG_REQ; bth1 = (!!is_fecn) << IB_BECN_SHIFT; /* * Inline ACKs go out without the use of the Verbs send engine, so * we need to set the STL Verbs Extended bit here */ bth1 |= HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT; hfi1_make_bth_aeth(qp, ohdr, bth0, bth1); } static inline void hfi1_make_rc_ack_16B(struct hfi1_packet *packet, struct hfi1_opa_header *opa_hdr, u8 sc5, bool is_fecn, u64 *pbc_flags, u32 *hwords, u32 *nwords) { struct rvt_qp *qp = packet->qp; struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); struct hfi1_16b_header *hdr = &opa_hdr->opah; struct ib_other_headers *ohdr; u32 bth0, bth1 = 0; u16 len, pkey; bool becn = is_fecn; u8 l4 = OPA_16B_L4_IB_LOCAL; u8 extra_bytes; opa_hdr->hdr_type = HFI1_PKT_TYPE_16B; ohdr = &hdr->u.oth; /* header size in 32-bit words 16B LRH+BTH+AETH = (16+12+4)/4 */ *hwords = 8; extra_bytes = hfi1_get_16b_padding(*hwords << 2, 0); *nwords = SIZE_OF_CRC + ((extra_bytes + SIZE_OF_LT) >> 2); if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) && hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))) { *hwords += hfi1_make_grh(ibp, &hdr->u.l.grh, rdma_ah_read_grh(&qp->remote_ah_attr), *hwords - 4, *nwords); ohdr = &hdr->u.l.oth; l4 = OPA_16B_L4_IB_GLOBAL; } *pbc_flags |= PBC_PACKET_BYPASS | PBC_INSERT_BYPASS_ICRC; /* read pkey_index w/o lock (its atomic) */ pkey = hfi1_get_pkey(ibp, qp->s_pkey_index); /* Convert dwords to flits */ len = (*hwords + *nwords) >> 1; hfi1_make_16b_hdr(hdr, ppd->lid | (rdma_ah_get_path_bits(&qp->remote_ah_attr) & ((1 << ppd->lmc) - 1)), opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr), 16B), len, pkey, becn, 0, l4, sc5); bth0 = pkey | (OP(ACKNOWLEDGE) << 24); bth0 |= extra_bytes << 20; if (qp->s_mig_state == IB_MIG_MIGRATED) bth1 = OPA_BTH_MIG_REQ; hfi1_make_bth_aeth(qp, ohdr, bth0, bth1); } typedef void (*hfi1_make_rc_ack)(struct hfi1_packet *packet, struct hfi1_opa_header *opa_hdr, u8 sc5, bool is_fecn, u64 *pbc_flags, u32 *hwords, u32 *nwords); /* We support only two types - 9B and 16B for now */ static const hfi1_make_rc_ack hfi1_make_rc_ack_tbl[2] = { [HFI1_PKT_TYPE_9B] = &hfi1_make_rc_ack_9B, [HFI1_PKT_TYPE_16B] = &hfi1_make_rc_ack_16B }; /* * hfi1_send_rc_ack - Construct an ACK packet and send it * * This is called from hfi1_rc_rcv() and handle_receive_interrupt(). * Note that RDMA reads and atomics are handled in the * send side QP state and send engine. */ void hfi1_send_rc_ack(struct hfi1_packet *packet, bool is_fecn) { struct hfi1_ctxtdata *rcd = packet->rcd; struct rvt_qp *qp = packet->qp; struct hfi1_ibport *ibp = rcd_to_iport(rcd); struct hfi1_qp_priv *priv = qp->priv; struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)]; u64 pbc, pbc_flags = 0; u32 hwords = 0; u32 nwords = 0; u32 plen; struct pio_buf *pbuf; struct hfi1_opa_header opa_hdr; /* clear the defer count */ qp->r_adefered = 0; /* Don't send ACK or NAK if a RDMA read or atomic is pending. */ if (qp->s_flags & RVT_S_RESP_PENDING) { hfi1_queue_rc_ack(packet, is_fecn); return; } /* Ensure s_rdma_ack_cnt changes are committed */ if (qp->s_rdma_ack_cnt) { hfi1_queue_rc_ack(packet, is_fecn); return; } /* Don't try to send ACKs if the link isn't ACTIVE */ if (driver_lstate(ppd) != IB_PORT_ACTIVE) return; /* Make the appropriate header */ hfi1_make_rc_ack_tbl[priv->hdr_type](packet, &opa_hdr, sc5, is_fecn, &pbc_flags, &hwords, &nwords); plen = 2 /* PBC */ + hwords + nwords; pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, sc_to_vlt(ppd->dd, sc5), plen); pbuf = sc_buffer_alloc(rcd->sc, plen, NULL, NULL); if (IS_ERR_OR_NULL(pbuf)) { /* * We have no room to send at the moment. Pass * responsibility for sending the ACK to the send engine * so that when enough buffer space becomes available, * the ACK is sent ahead of other outgoing packets. */ hfi1_queue_rc_ack(packet, is_fecn); return; } trace_ack_output_ibhdr(dd_from_ibdev(qp->ibqp.device), &opa_hdr, ib_is_sc5(sc5)); /* write the pbc and data */ ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc, (priv->hdr_type == HFI1_PKT_TYPE_9B ? (void *)&opa_hdr.ibh : (void *)&opa_hdr.opah), hwords); return; } /** * update_num_rd_atomic - update the qp->s_num_rd_atomic * @qp: the QP * @psn: the packet sequence number to restart at * @wqe: the wqe * * This is called from reset_psn() to update qp->s_num_rd_atomic * for the current wqe. * Called at interrupt level with the QP s_lock held. */ static void update_num_rd_atomic(struct rvt_qp *qp, u32 psn, struct rvt_swqe *wqe) { u32 opcode = wqe->wr.opcode; if (opcode == IB_WR_RDMA_READ || opcode == IB_WR_ATOMIC_CMP_AND_SWP || opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { qp->s_num_rd_atomic++; } else if (opcode == IB_WR_TID_RDMA_READ) { struct tid_rdma_request *req = wqe_to_tid_req(wqe); struct hfi1_qp_priv *priv = qp->priv; if (cmp_psn(psn, wqe->lpsn) <= 0) { u32 cur_seg; cur_seg = (psn - wqe->psn) / priv->pkts_ps; req->ack_pending = cur_seg - req->comp_seg; priv->pending_tid_r_segs += req->ack_pending; qp->s_num_rd_atomic += req->ack_pending; trace_hfi1_tid_req_update_num_rd_atomic(qp, 0, wqe->wr.opcode, wqe->psn, wqe->lpsn, req); } else { priv->pending_tid_r_segs += req->total_segs; qp->s_num_rd_atomic += req->total_segs; } } } /** * reset_psn - reset the QP state to send starting from PSN * @qp: the QP * @psn: the packet sequence number to restart at * * This is called from hfi1_rc_rcv() to process an incoming RC ACK * for the given QP. * Called at interrupt level with the QP s_lock held. */ static void reset_psn(struct rvt_qp *qp, u32 psn) { u32 n = qp->s_acked; struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n); u32 opcode; struct hfi1_qp_priv *priv = qp->priv; lockdep_assert_held(&qp->s_lock); qp->s_cur = n; priv->pending_tid_r_segs = 0; priv->pending_tid_w_resp = 0; qp->s_num_rd_atomic = 0; /* * If we are starting the request from the beginning, * let the normal send code handle initialization. */ if (cmp_psn(psn, wqe->psn) <= 0) { qp->s_state = OP(SEND_LAST); goto done; } update_num_rd_atomic(qp, psn, wqe); /* Find the work request opcode corresponding to the given PSN. */ for (;;) { int diff; if (++n == qp->s_size) n = 0; if (n == qp->s_tail) break; wqe = rvt_get_swqe_ptr(qp, n); diff = cmp_psn(psn, wqe->psn); if (diff < 0) { /* Point wqe back to the previous one*/ wqe = rvt_get_swqe_ptr(qp, qp->s_cur); break; } qp->s_cur = n; /* * If we are starting the request from the beginning, * let the normal send code handle initialization. */ if (diff == 0) { qp->s_state = OP(SEND_LAST); goto done; } update_num_rd_atomic(qp, psn, wqe); } opcode = wqe->wr.opcode; /* * Set the state to restart in the middle of a request. * Don't change the s_sge, s_cur_sge, or s_cur_size. * See hfi1_make_rc_req(). */ switch (opcode) { case IB_WR_SEND: case IB_WR_SEND_WITH_IMM: qp->s_state = OP(RDMA_READ_RESPONSE_FIRST); break; case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE_WITH_IMM: qp->s_state = OP(RDMA_READ_RESPONSE_LAST); break; case IB_WR_TID_RDMA_WRITE: qp->s_state = TID_OP(WRITE_RESP); break; case IB_WR_RDMA_READ: qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE); break; case IB_WR_TID_RDMA_READ: qp->s_state = TID_OP(READ_RESP); break; default: /* * This case shouldn't happen since its only * one PSN per req. */ qp->s_state = OP(SEND_LAST); } done: priv->s_flags &= ~HFI1_S_TID_WAIT_INTERLCK; qp->s_psn = psn; /* * Set RVT_S_WAIT_PSN as rc_complete() may start the timer * asynchronously before the send engine can get scheduled. * Doing it in hfi1_make_rc_req() is too late. */ if ((cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) && (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) qp->s_flags |= RVT_S_WAIT_PSN; qp->s_flags &= ~HFI1_S_AHG_VALID; trace_hfi1_sender_reset_psn(qp); } /* * Back up requester to resend the last un-ACKed request. * The QP r_lock and s_lock should be held and interrupts disabled. */ void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait) { struct hfi1_qp_priv *priv = qp->priv; struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked); struct hfi1_ibport *ibp; lockdep_assert_held(&qp->r_lock); lockdep_assert_held(&qp->s_lock); trace_hfi1_sender_restart_rc(qp); if (qp->s_retry == 0) { if (qp->s_mig_state == IB_MIG_ARMED) { hfi1_migrate_qp(qp); qp->s_retry = qp->s_retry_cnt; } else if (qp->s_last == qp->s_acked) { /* * We need special handling for the OPFN request WQEs as * they are not allowed to generate real user errors */ if (wqe->wr.opcode == IB_WR_OPFN) { struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); /* * Call opfn_conn_reply() with capcode and * remaining data as 0 to close out the * current request */ opfn_conn_reply(qp, priv->opfn.curr); wqe = do_rc_completion(qp, wqe, ibp); qp->s_flags &= ~RVT_S_WAIT_ACK; } else { trace_hfi1_tid_write_sender_restart_rc(qp, 0); if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) { struct tid_rdma_request *req; req = wqe_to_tid_req(wqe); hfi1_kern_exp_rcv_clear_all(req); hfi1_kern_clear_hw_flow(priv->rcd, qp); } hfi1_trdma_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR); rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); } return; } else { /* need to handle delayed completion */ return; } } else { qp->s_retry--; } ibp = to_iport(qp->ibqp.device, qp->port_num); if (wqe->wr.opcode == IB_WR_RDMA_READ || wqe->wr.opcode == IB_WR_TID_RDMA_READ) ibp->rvp.n_rc_resends++; else ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn); qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR | RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN | RVT_S_WAIT_ACK | HFI1_S_WAIT_TID_RESP); if (wait) qp->s_flags |= RVT_S_SEND_ONE; reset_psn(qp, psn); } /* * Set qp->s_sending_psn to the next PSN after the given one. * This would be psn+1 except when RDMA reads or TID RDMA ops * are present. */ static void reset_sending_psn(struct rvt_qp *qp, u32 psn) { struct rvt_swqe *wqe; u32 n = qp->s_last; lockdep_assert_held(&qp->s_lock); /* Find the work request corresponding to the given PSN. */ for (;;) { wqe = rvt_get_swqe_ptr(qp, n); if (cmp_psn(psn, wqe->lpsn) <= 0) { if (wqe->wr.opcode == IB_WR_RDMA_READ || wqe->wr.opcode == IB_WR_TID_RDMA_READ || wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) qp->s_sending_psn = wqe->lpsn + 1; else qp->s_sending_psn = psn + 1; break; } if (++n == qp->s_size) n = 0; if (n == qp->s_tail) break; } } /** * hfi1_rc_verbs_aborted - handle abort status * @qp: the QP * @opah: the opa header * * This code modifies both ACK bit in BTH[2] * and the s_flags to go into send one mode. * * This serves to throttle the send engine to only * send a single packet in the likely case the * a link has gone down. */ void hfi1_rc_verbs_aborted(struct rvt_qp *qp, struct hfi1_opa_header *opah) { struct ib_other_headers *ohdr = hfi1_get_rc_ohdr(opah); u8 opcode = ib_bth_get_opcode(ohdr); u32 psn; /* ignore responses */ if ((opcode >= OP(RDMA_READ_RESPONSE_FIRST) && opcode <= OP(ATOMIC_ACKNOWLEDGE)) || opcode == TID_OP(READ_RESP) || opcode == TID_OP(WRITE_RESP)) return; psn = ib_bth_get_psn(ohdr) | IB_BTH_REQ_ACK; ohdr->bth[2] = cpu_to_be32(psn); qp->s_flags |= RVT_S_SEND_ONE; } /* * This should be called with the QP s_lock held and interrupts disabled. */ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah) { struct ib_other_headers *ohdr; struct hfi1_qp_priv *priv = qp->priv; struct rvt_swqe *wqe; u32 opcode, head, tail; u32 psn; struct tid_rdma_request *req; lockdep_assert_held(&qp->s_lock); if (!(ib_rvt_state_ops[qp->state] & RVT_SEND_OR_FLUSH_OR_RECV_OK)) return; ohdr = hfi1_get_rc_ohdr(opah); opcode = ib_bth_get_opcode(ohdr); if ((opcode >= OP(RDMA_READ_RESPONSE_FIRST) && opcode <= OP(ATOMIC_ACKNOWLEDGE)) || opcode == TID_OP(READ_RESP) || opcode == TID_OP(WRITE_RESP)) { WARN_ON(!qp->s_rdma_ack_cnt); qp->s_rdma_ack_cnt--; return; } psn = ib_bth_get_psn(ohdr); /* * Don't attempt to reset the sending PSN for packets in the * KDETH PSN space since the PSN does not match anything. */ if (opcode != TID_OP(WRITE_DATA) && opcode != TID_OP(WRITE_DATA_LAST) && opcode != TID_OP(ACK) && opcode != TID_OP(RESYNC)) reset_sending_psn(qp, psn); /* Handle TID RDMA WRITE packets differently */ if (opcode >= TID_OP(WRITE_REQ) && opcode <= TID_OP(WRITE_DATA_LAST)) { head = priv->s_tid_head; tail = priv->s_tid_cur; /* * s_tid_cur is set to s_tid_head in the case, where * a new TID RDMA request is being started and all * previous ones have been completed. * Therefore, we need to do a secondary check in order * to properly determine whether we should start the * RC timer. */ wqe = rvt_get_swqe_ptr(qp, tail); req = wqe_to_tid_req(wqe); if (head == tail && req->comp_seg < req->total_segs) { if (tail == 0) tail = qp->s_size - 1; else tail -= 1; } } else { head = qp->s_tail; tail = qp->s_acked; } /* * Start timer after a packet requesting an ACK has been sent and * there are still requests that haven't been acked. */ if ((psn & IB_BTH_REQ_ACK) && tail != head && opcode != TID_OP(WRITE_DATA) && opcode != TID_OP(WRITE_DATA_LAST) && opcode != TID_OP(RESYNC) && !(qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) && (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) { if (opcode == TID_OP(READ_REQ)) rvt_add_retry_timer_ext(qp, priv->timeout_shift); else rvt_add_retry_timer(qp); } /* Start TID RDMA ACK timer */ if ((opcode == TID_OP(WRITE_DATA) || opcode == TID_OP(WRITE_DATA_LAST) || opcode == TID_OP(RESYNC)) && (psn & IB_BTH_REQ_ACK) && !(priv->s_flags & HFI1_S_TID_RETRY_TIMER) && (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) { /* * The TID RDMA ACK packet could be received before this * function is called. Therefore, add the timer only if TID * RDMA ACK packets are actually pending. */ wqe = rvt_get_swqe_ptr(qp, qp->s_acked); req = wqe_to_tid_req(wqe); if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE && req->ack_seg < req->cur_seg) hfi1_add_tid_retry_timer(qp); } while (qp->s_last != qp->s_acked) { wqe = rvt_get_swqe_ptr(qp, qp->s_last); if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 && cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) break; trdma_clean_swqe(qp, wqe); trace_hfi1_qp_send_completion(qp, wqe, qp->s_last); rvt_qp_complete_swqe(qp, wqe, ib_hfi1_wc_opcode[wqe->wr.opcode], IB_WC_SUCCESS); } /* * If we were waiting for sends to complete before re-sending, * and they are now complete, restart sending. */ trace_hfi1_sendcomplete(qp, psn); if (qp->s_flags & RVT_S_WAIT_PSN && cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { qp->s_flags &= ~RVT_S_WAIT_PSN; qp->s_sending_psn = qp->s_psn; qp->s_sending_hpsn = qp->s_psn - 1; hfi1_schedule_send(qp); } } static inline void update_last_psn(struct rvt_qp *qp, u32 psn) { qp->s_last_psn = psn; } /* * Generate a SWQE completion. * This is similar to hfi1_send_complete but has to check to be sure * that the SGEs are not being referenced if the SWQE is being resent. */ struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, struct rvt_swqe *wqe, struct hfi1_ibport *ibp) { struct hfi1_qp_priv *priv = qp->priv; lockdep_assert_held(&qp->s_lock); /* * Don't decrement refcount and don't generate a * completion if the SWQE is being resent until the send * is finished. */ trace_hfi1_rc_completion(qp, wqe->lpsn); if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 || cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { trdma_clean_swqe(qp, wqe); trace_hfi1_qp_send_completion(qp, wqe, qp->s_last); rvt_qp_complete_swqe(qp, wqe, ib_hfi1_wc_opcode[wqe->wr.opcode], IB_WC_SUCCESS); } else { struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); this_cpu_inc(*ibp->rvp.rc_delayed_comp); /* * If send progress not running attempt to progress * SDMA queue. */ if (ppd->dd->flags & HFI1_HAS_SEND_DMA) { struct sdma_engine *engine; u8 sl = rdma_ah_get_sl(&qp->remote_ah_attr); u8 sc5; /* For now use sc to find engine */ sc5 = ibp->sl_to_sc[sl]; engine = qp_to_sdma_engine(qp, sc5); sdma_engine_progress_schedule(engine); } } qp->s_retry = qp->s_retry_cnt; /* * Don't update the last PSN if the request being completed is * a TID RDMA WRITE request. * Completion of the TID RDMA WRITE requests are done by the * TID RDMA ACKs and as such could be for a request that has * already been ACKed as far as the IB state machine is * concerned. */ if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE) update_last_psn(qp, wqe->lpsn); /* * If we are completing a request which is in the process of * being resent, we can stop re-sending it since we know the * responder has already seen it. */ if (qp->s_acked == qp->s_cur) { if (++qp->s_cur >= qp->s_size) qp->s_cur = 0; qp->s_acked = qp->s_cur; wqe = rvt_get_swqe_ptr(qp, qp->s_cur); if (qp->s_acked != qp->s_tail) { qp->s_state = OP(SEND_LAST); qp->s_psn = wqe->psn; } } else { if (++qp->s_acked >= qp->s_size) qp->s_acked = 0; if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur) qp->s_draining = 0; wqe = rvt_get_swqe_ptr(qp, qp->s_acked); } if (priv->s_flags & HFI1_S_TID_WAIT_INTERLCK) { priv->s_flags &= ~HFI1_S_TID_WAIT_INTERLCK; hfi1_schedule_send(qp); } return wqe; } static void set_restart_qp(struct rvt_qp *qp, struct hfi1_ctxtdata *rcd) { /* Retry this request. */ if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) { qp->r_flags |= RVT_R_RDMAR_SEQ; hfi1_restart_rc(qp, qp->s_last_psn + 1, 0); if (list_empty(&qp->rspwait)) { qp->r_flags |= RVT_R_RSP_SEND; rvt_get_qp(qp); list_add_tail(&qp->rspwait, &rcd->qp_wait_list); } } } /** * update_qp_retry_state - Update qp retry state. * @qp: the QP * @psn: the packet sequence number of the TID RDMA WRITE RESP. * @spsn: The start psn for the given TID RDMA WRITE swqe. * @lpsn: The last psn for the given TID RDMA WRITE swqe. * * This function is called to update the qp retry state upon * receiving a TID WRITE RESP after the qp is scheduled to retry * a request. */ static void update_qp_retry_state(struct rvt_qp *qp, u32 psn, u32 spsn, u32 lpsn) { struct hfi1_qp_priv *qpriv = qp->priv; qp->s_psn = psn + 1; /* * If this is the first TID RDMA WRITE RESP packet for the current * request, change the s_state so that the retry will be processed * correctly. Similarly, if this is the last TID RDMA WRITE RESP * packet, change the s_state and advance the s_cur. */ if (cmp_psn(psn, lpsn) >= 0) { qp->s_cur = qpriv->s_tid_cur + 1; if (qp->s_cur >= qp->s_size) qp->s_cur = 0; qp->s_state = TID_OP(WRITE_REQ); } else if (!cmp_psn(psn, spsn)) { qp->s_cur = qpriv->s_tid_cur; qp->s_state = TID_OP(WRITE_RESP); } } /* * do_rc_ack - process an incoming RC ACK * @qp: the QP the ACK came in on * @psn: the packet sequence number of the ACK * @opcode: the opcode of the request that resulted in the ACK * * This is called from rc_rcv_resp() to process an incoming RC ACK * for the given QP. * May be called at interrupt level, with the QP s_lock held. * Returns 1 if OK, 0 if current operation should be aborted (NAK). */ int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, u64 val, struct hfi1_ctxtdata *rcd) { struct hfi1_ibport *ibp; enum ib_wc_status status; struct hfi1_qp_priv *qpriv = qp->priv; struct rvt_swqe *wqe; int ret = 0; u32 ack_psn; int diff; struct rvt_dev_info *rdi; lockdep_assert_held(&qp->s_lock); /* * Note that NAKs implicitly ACK outstanding SEND and RDMA write * requests and implicitly NAK RDMA read and atomic requests issued * before the NAK'ed request. The MSN won't include the NAK'ed * request but will include an ACK'ed request(s). */ ack_psn = psn; if (aeth >> IB_AETH_NAK_SHIFT) ack_psn--; wqe = rvt_get_swqe_ptr(qp, qp->s_acked); ibp = rcd_to_iport(rcd); /* * The MSN might be for a later WQE than the PSN indicates so * only complete WQEs that the PSN finishes. */ while ((diff = delta_psn(ack_psn, wqe->lpsn)) >= 0) { /* * RDMA_READ_RESPONSE_ONLY is a special case since * we want to generate completion events for everything * before the RDMA read, copy the data, then generate * the completion for the read. */ if (wqe->wr.opcode == IB_WR_RDMA_READ && opcode == OP(RDMA_READ_RESPONSE_ONLY) && diff == 0) { ret = 1; goto bail_stop; } /* * If this request is a RDMA read or atomic, and the ACK is * for a later operation, this ACK NAKs the RDMA read or * atomic. In other words, only a RDMA_READ_LAST or ONLY * can ACK a RDMA read and likewise for atomic ops. Note * that the NAK case can only happen if relaxed ordering is * used and requests are sent after an RDMA read or atomic * is sent but before the response is received. */ if ((wqe->wr.opcode == IB_WR_RDMA_READ && (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) || (wqe->wr.opcode == IB_WR_TID_RDMA_READ && (opcode != TID_OP(READ_RESP) || diff != 0)) || ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) && (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0)) || (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE && (delta_psn(psn, qp->s_last_psn) != 1))) { set_restart_qp(qp, rcd); /* * No need to process the ACK/NAK since we are * restarting an earlier request. */ goto bail_stop; } if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { u64 *vaddr = wqe->sg_list[0].vaddr; *vaddr = val; } if (wqe->wr.opcode == IB_WR_OPFN) opfn_conn_reply(qp, val); if (qp->s_num_rd_atomic && (wqe->wr.opcode == IB_WR_RDMA_READ || wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) { qp->s_num_rd_atomic--; /* Restart sending task if fence is complete */ if ((qp->s_flags & RVT_S_WAIT_FENCE) && !qp->s_num_rd_atomic) { qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_ACK); hfi1_schedule_send(qp); } else if (qp->s_flags & RVT_S_WAIT_RDMAR) { qp->s_flags &= ~(RVT_S_WAIT_RDMAR | RVT_S_WAIT_ACK); hfi1_schedule_send(qp); } } /* * TID RDMA WRITE requests will be completed by the TID RDMA * ACK packet handler (see tid_rdma.c). */ if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) break; wqe = do_rc_completion(qp, wqe, ibp); if (qp->s_acked == qp->s_tail) break; } trace_hfi1_rc_ack_do(qp, aeth, psn, wqe); trace_hfi1_sender_do_rc_ack(qp); switch (aeth >> IB_AETH_NAK_SHIFT) { case 0: /* ACK */ this_cpu_inc(*ibp->rvp.rc_acks); if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) { if (wqe_to_tid_req(wqe)->ack_pending) rvt_mod_retry_timer_ext(qp, qpriv->timeout_shift); else rvt_stop_rc_timers(qp); } else if (qp->s_acked != qp->s_tail) { struct rvt_swqe *__w = NULL; if (qpriv->s_tid_cur != HFI1_QP_WQE_INVALID) __w = rvt_get_swqe_ptr(qp, qpriv->s_tid_cur); /* * Stop timers if we've received all of the TID RDMA * WRITE * responses. */ if (__w && __w->wr.opcode == IB_WR_TID_RDMA_WRITE && opcode == TID_OP(WRITE_RESP)) { /* * Normally, the loop above would correctly * process all WQEs from s_acked onward and * either complete them or check for correct * PSN sequencing. * However, for TID RDMA, due to pipelining, * the response may not be for the request at * s_acked so the above look would just be * skipped. This does not allow for checking * the PSN sequencing. It has to be done * separately. */ if (cmp_psn(psn, qp->s_last_psn + 1)) { set_restart_qp(qp, rcd); goto bail_stop; } /* * If the psn is being resent, stop the * resending. */ if (qp->s_cur != qp->s_tail && cmp_psn(qp->s_psn, psn) <= 0) update_qp_retry_state(qp, psn, __w->psn, __w->lpsn); else if (--qpriv->pending_tid_w_resp) rvt_mod_retry_timer(qp); else rvt_stop_rc_timers(qp); } else { /* * We are expecting more ACKs so * mod the retry timer. */ rvt_mod_retry_timer(qp); /* * We can stop re-sending the earlier packets * and continue with the next packet the * receiver wants. */ if (cmp_psn(qp->s_psn, psn) <= 0) reset_psn(qp, psn + 1); } } else { /* No more acks - kill all timers */ rvt_stop_rc_timers(qp); if (cmp_psn(qp->s_psn, psn) <= 0) { qp->s_state = OP(SEND_LAST); qp->s_psn = psn + 1; } } if (qp->s_flags & RVT_S_WAIT_ACK) { qp->s_flags &= ~RVT_S_WAIT_ACK; hfi1_schedule_send(qp); } rvt_get_credit(qp, aeth); qp->s_rnr_retry = qp->s_rnr_retry_cnt; qp->s_retry = qp->s_retry_cnt; /* * If the current request is a TID RDMA WRITE request and the * response is not a TID RDMA WRITE RESP packet, s_last_psn * can't be advanced. */ if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE && opcode != TID_OP(WRITE_RESP) && cmp_psn(psn, wqe->psn) >= 0) return 1; update_last_psn(qp, psn); return 1; case 1: /* RNR NAK */ ibp->rvp.n_rnr_naks++; if (qp->s_acked == qp->s_tail) goto bail_stop; if (qp->s_flags & RVT_S_WAIT_RNR) goto bail_stop; rdi = ib_to_rvt(qp->ibqp.device); if (!(rdi->post_parms[wqe->wr.opcode].flags & RVT_OPERATION_IGN_RNR_CNT)) { if (qp->s_rnr_retry == 0) { status = IB_WC_RNR_RETRY_EXC_ERR; goto class_b; } if (qp->s_rnr_retry_cnt < 7 && qp->s_rnr_retry_cnt > 0) qp->s_rnr_retry--; } /* * The last valid PSN is the previous PSN. For TID RDMA WRITE * request, s_last_psn should be incremented only when a TID * RDMA WRITE RESP is received to avoid skipping lost TID RDMA * WRITE RESP packets. */ if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) { reset_psn(qp, qp->s_last_psn + 1); } else { update_last_psn(qp, psn - 1); reset_psn(qp, psn); } ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn); qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK); rvt_stop_rc_timers(qp); rvt_add_rnr_timer(qp, aeth); return 0; case 3: /* NAK */ if (qp->s_acked == qp->s_tail) goto bail_stop; /* The last valid PSN is the previous PSN. */ update_last_psn(qp, psn - 1); switch ((aeth >> IB_AETH_CREDIT_SHIFT) & IB_AETH_CREDIT_MASK) { case 0: /* PSN sequence error */ ibp->rvp.n_seq_naks++; /* * Back up to the responder's expected PSN. * Note that we might get a NAK in the middle of an * RDMA READ response which terminates the RDMA * READ. */ hfi1_restart_rc(qp, psn, 0); hfi1_schedule_send(qp); break; case 1: /* Invalid Request */ status = IB_WC_REM_INV_REQ_ERR; ibp->rvp.n_other_naks++; goto class_b; case 2: /* Remote Access Error */ status = IB_WC_REM_ACCESS_ERR; ibp->rvp.n_other_naks++; goto class_b; case 3: /* Remote Operation Error */ status = IB_WC_REM_OP_ERR; ibp->rvp.n_other_naks++; class_b: if (qp->s_last == qp->s_acked) { if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) hfi1_kern_read_tid_flow_free(qp); hfi1_trdma_send_complete(qp, wqe, status); rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); } break; default: /* Ignore other reserved NAK error codes */ goto reserved; } qp->s_retry = qp->s_retry_cnt; qp->s_rnr_retry = qp->s_rnr_retry_cnt; goto bail_stop; default: /* 2: reserved */ reserved: /* Ignore reserved NAK codes. */ goto bail_stop; } /* cannot be reached */ bail_stop: rvt_stop_rc_timers(qp); return ret; } /* * We have seen an out of sequence RDMA read middle or last packet. * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE. */ static void rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn, struct hfi1_ctxtdata *rcd) { struct rvt_swqe *wqe; lockdep_assert_held(&qp->s_lock); /* Remove QP from retry timer */ rvt_stop_rc_timers(qp); wqe = rvt_get_swqe_ptr(qp, qp->s_acked); while (cmp_psn(psn, wqe->lpsn) > 0) { if (wqe->wr.opcode == IB_WR_RDMA_READ || wqe->wr.opcode == IB_WR_TID_RDMA_READ || wqe->wr.opcode == IB_WR_TID_RDMA_WRITE || wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) break; wqe = do_rc_completion(qp, wqe, ibp); } ibp->rvp.n_rdma_seq++; qp->r_flags |= RVT_R_RDMAR_SEQ; hfi1_restart_rc(qp, qp->s_last_psn + 1, 0); if (list_empty(&qp->rspwait)) { qp->r_flags |= RVT_R_RSP_SEND; rvt_get_qp(qp); list_add_tail(&qp->rspwait, &rcd->qp_wait_list); } } /** * rc_rcv_resp - process an incoming RC response packet * @packet: data packet information * * This is called from hfi1_rc_rcv() to process an incoming RC response * packet for the given QP. * Called at interrupt level. */ static void rc_rcv_resp(struct hfi1_packet *packet) { struct hfi1_ctxtdata *rcd = packet->rcd; void *data = packet->payload; u32 tlen = packet->tlen; struct rvt_qp *qp = packet->qp; struct hfi1_ibport *ibp; struct ib_other_headers *ohdr = packet->ohdr; struct rvt_swqe *wqe; enum ib_wc_status status; unsigned long flags; int diff; u64 val; u32 aeth; u32 psn = ib_bth_get_psn(packet->ohdr); u32 pmtu = qp->pmtu; u16 hdrsize = packet->hlen; u8 opcode = packet->opcode; u8 pad = packet->pad; u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2); spin_lock_irqsave(&qp->s_lock, flags); trace_hfi1_ack(qp, psn); /* Ignore invalid responses. */ if (cmp_psn(psn, READ_ONCE(qp->s_next_psn)) >= 0) goto ack_done; /* Ignore duplicate responses. */ diff = cmp_psn(psn, qp->s_last_psn); if (unlikely(diff <= 0)) { /* Update credits for "ghost" ACKs */ if (diff == 0 && opcode == OP(ACKNOWLEDGE)) { aeth = be32_to_cpu(ohdr->u.aeth); if ((aeth >> IB_AETH_NAK_SHIFT) == 0) rvt_get_credit(qp, aeth); } goto ack_done; } /* * Skip everything other than the PSN we expect, if we are waiting * for a reply to a restarted RDMA read or atomic op. */ if (qp->r_flags & RVT_R_RDMAR_SEQ) { if (cmp_psn(psn, qp->s_last_psn + 1) != 0) goto ack_done; qp->r_flags &= ~RVT_R_RDMAR_SEQ; } if (unlikely(qp->s_acked == qp->s_tail)) goto ack_done; wqe = rvt_get_swqe_ptr(qp, qp->s_acked); status = IB_WC_SUCCESS; switch (opcode) { case OP(ACKNOWLEDGE): case OP(ATOMIC_ACKNOWLEDGE): case OP(RDMA_READ_RESPONSE_FIRST): aeth = be32_to_cpu(ohdr->u.aeth); if (opcode == OP(ATOMIC_ACKNOWLEDGE)) val = ib_u64_get(&ohdr->u.at.atomic_ack_eth); else val = 0; if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) || opcode != OP(RDMA_READ_RESPONSE_FIRST)) goto ack_done; wqe = rvt_get_swqe_ptr(qp, qp->s_acked); if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) goto ack_op_err; /* * If this is a response to a resent RDMA read, we * have to be careful to copy the data to the right * location. */ qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge, wqe, psn, pmtu); goto read_middle; case OP(RDMA_READ_RESPONSE_MIDDLE): /* no AETH, no ACK */ if (unlikely(cmp_psn(psn, qp->s_last_psn + 1))) goto ack_seq_err; if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) goto ack_op_err; read_middle: if (unlikely(tlen != (hdrsize + pmtu + extra_bytes))) goto ack_len_err; if (unlikely(pmtu >= qp->s_rdma_read_len)) goto ack_len_err; /* * We got a response so update the timeout. * 4.096 usec. * (1 << qp->timeout) */ rvt_mod_retry_timer(qp); if (qp->s_flags & RVT_S_WAIT_ACK) { qp->s_flags &= ~RVT_S_WAIT_ACK; hfi1_schedule_send(qp); } if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE)) qp->s_retry = qp->s_retry_cnt; /* * Update the RDMA receive state but do the copy w/o * holding the locks and blocking interrupts. */ qp->s_rdma_read_len -= pmtu; update_last_psn(qp, psn); spin_unlock_irqrestore(&qp->s_lock, flags); rvt_copy_sge(qp, &qp->s_rdma_read_sge, data, pmtu, false, false); goto bail; case OP(RDMA_READ_RESPONSE_ONLY): aeth = be32_to_cpu(ohdr->u.aeth); if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd)) goto ack_done; /* * Check that the data size is >= 0 && <= pmtu. * Remember to account for ICRC (4). */ if (unlikely(tlen < (hdrsize + extra_bytes))) goto ack_len_err; /* * If this is a response to a resent RDMA read, we * have to be careful to copy the data to the right * location. */ wqe = rvt_get_swqe_ptr(qp, qp->s_acked); qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge, wqe, psn, pmtu); goto read_last; case OP(RDMA_READ_RESPONSE_LAST): /* ACKs READ req. */ if (unlikely(cmp_psn(psn, qp->s_last_psn + 1))) goto ack_seq_err; if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) goto ack_op_err; /* * Check that the data size is >= 1 && <= pmtu. * Remember to account for ICRC (4). */ if (unlikely(tlen <= (hdrsize + extra_bytes))) goto ack_len_err; read_last: tlen -= hdrsize + extra_bytes; if (unlikely(tlen != qp->s_rdma_read_len)) goto ack_len_err; aeth = be32_to_cpu(ohdr->u.aeth); rvt_copy_sge(qp, &qp->s_rdma_read_sge, data, tlen, false, false); WARN_ON(qp->s_rdma_read_sge.num_sge); (void)do_rc_ack(qp, aeth, psn, OP(RDMA_READ_RESPONSE_LAST), 0, rcd); goto ack_done; } ack_op_err: status = IB_WC_LOC_QP_OP_ERR; goto ack_err; ack_seq_err: ibp = rcd_to_iport(rcd); rdma_seq_err(qp, ibp, psn, rcd); goto ack_done; ack_len_err: status = IB_WC_LOC_LEN_ERR; ack_err: if (qp->s_last == qp->s_acked) { rvt_send_complete(qp, wqe, status); rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); } ack_done: spin_unlock_irqrestore(&qp->s_lock, flags); bail: return; } static inline void rc_cancel_ack(struct rvt_qp *qp) { qp->r_adefered = 0; if (list_empty(&qp->rspwait)) return; list_del_init(&qp->rspwait); qp->r_flags &= ~RVT_R_RSP_NAK; rvt_put_qp(qp); } /** * rc_rcv_error - process an incoming duplicate or error RC packet * @ohdr: the other headers for this packet * @data: the packet data * @qp: the QP for this packet * @opcode: the opcode for this packet * @psn: the packet sequence number for this packet * @diff: the difference between the PSN and the expected PSN * @rcd: the receive context * * This is called from hfi1_rc_rcv() to process an unexpected * incoming RC packet for the given QP. * Called at interrupt level. * Return 1 if no more processing is needed; otherwise return 0 to * schedule a response to be sent. */ static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data, struct rvt_qp *qp, u32 opcode, u32 psn, int diff, struct hfi1_ctxtdata *rcd) { struct hfi1_ibport *ibp = rcd_to_iport(rcd); struct rvt_ack_entry *e; unsigned long flags; u8 prev; u8 mra; /* most recent ACK */ bool old_req; trace_hfi1_rcv_error(qp, psn); if (diff > 0) { /* * Packet sequence error. * A NAK will ACK earlier sends and RDMA writes. * Don't queue the NAK if we already sent one. */ if (!qp->r_nak_state) { ibp->rvp.n_rc_seqnak++; qp->r_nak_state = IB_NAK_PSN_ERROR; /* Use the expected PSN. */ qp->r_ack_psn = qp->r_psn; /* * Wait to send the sequence NAK until all packets * in the receive queue have been processed. * Otherwise, we end up propagating congestion. */ rc_defered_ack(rcd, qp); } goto done; } /* * Handle a duplicate request. Don't re-execute SEND, RDMA * write or atomic op. Don't NAK errors, just silently drop * the duplicate request. Note that r_sge, r_len, and * r_rcv_len may be in use so don't modify them. * * We are supposed to ACK the earliest duplicate PSN but we * can coalesce an outstanding duplicate ACK. We have to * send the earliest so that RDMA reads can be restarted at * the requester's expected PSN. * * First, find where this duplicate PSN falls within the * ACKs previously sent. * old_req is true if there is an older response that is scheduled * to be sent before sending this one. */ e = NULL; old_req = true; ibp->rvp.n_rc_dupreq++; spin_lock_irqsave(&qp->s_lock, flags); e = find_prev_entry(qp, psn, &prev, &mra, &old_req); switch (opcode) { case OP(RDMA_READ_REQUEST): { struct ib_reth *reth; u32 offset; u32 len; /* * If we didn't find the RDMA read request in the ack queue, * we can ignore this request. */ if (!e || e->opcode != OP(RDMA_READ_REQUEST)) goto unlock_done; /* RETH comes after BTH */ reth = &ohdr->u.rc.reth; /* * Address range must be a subset of the original * request and start on pmtu boundaries. * We reuse the old ack_queue slot since the requester * should not back up and request an earlier PSN for the * same request. */ offset = delta_psn(psn, e->psn) * qp->pmtu; len = be32_to_cpu(reth->length); if (unlikely(offset + len != e->rdma_sge.sge_length)) goto unlock_done; release_rdma_sge_mr(e); if (len != 0) { u32 rkey = be32_to_cpu(reth->rkey); u64 vaddr = get_ib_reth_vaddr(reth); int ok; ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey, IB_ACCESS_REMOTE_READ); if (unlikely(!ok)) goto unlock_done; } else { e->rdma_sge.vaddr = NULL; e->rdma_sge.length = 0; e->rdma_sge.sge_length = 0; } e->psn = psn; if (old_req) goto unlock_done; if (qp->s_acked_ack_queue == qp->s_tail_ack_queue) qp->s_acked_ack_queue = prev; qp->s_tail_ack_queue = prev; break; } case OP(COMPARE_SWAP): case OP(FETCH_ADD): { /* * If we didn't find the atomic request in the ack queue * or the send engine is already backed up to send an * earlier entry, we can ignore this request. */ if (!e || e->opcode != (u8)opcode || old_req) goto unlock_done; if (qp->s_tail_ack_queue == qp->s_acked_ack_queue) qp->s_acked_ack_queue = prev; qp->s_tail_ack_queue = prev; break; } default: /* * Ignore this operation if it doesn't request an ACK * or an earlier RDMA read or atomic is going to be resent. */ if (!(psn & IB_BTH_REQ_ACK) || old_req) goto unlock_done; /* * Resend the most recent ACK if this request is * after all the previous RDMA reads and atomics. */ if (mra == qp->r_head_ack_queue) { spin_unlock_irqrestore(&qp->s_lock, flags); qp->r_nak_state = 0; qp->r_ack_psn = qp->r_psn - 1; goto send_ack; } /* * Resend the RDMA read or atomic op which * ACKs this duplicate request. */ if (qp->s_tail_ack_queue == qp->s_acked_ack_queue) qp->s_acked_ack_queue = mra; qp->s_tail_ack_queue = mra; break; } qp->s_ack_state = OP(ACKNOWLEDGE); qp->s_flags |= RVT_S_RESP_PENDING; qp->r_nak_state = 0; hfi1_schedule_send(qp); unlock_done: spin_unlock_irqrestore(&qp->s_lock, flags); done: return 1; send_ack: return 0; } static void log_cca_event(struct hfi1_pportdata *ppd, u8 sl, u32 rlid, u32 lqpn, u32 rqpn, u8 svc_type) { struct opa_hfi1_cong_log_event_internal *cc_event; unsigned long flags; if (sl >= OPA_MAX_SLS) return; spin_lock_irqsave(&ppd->cc_log_lock, flags); ppd->threshold_cong_event_map[sl / 8] |= 1 << (sl % 8); ppd->threshold_event_counter++; cc_event = &ppd->cc_events[ppd->cc_log_idx++]; if (ppd->cc_log_idx == OPA_CONG_LOG_ELEMS) ppd->cc_log_idx = 0; cc_event->lqpn = lqpn & RVT_QPN_MASK; cc_event->rqpn = rqpn & RVT_QPN_MASK; cc_event->sl = sl; cc_event->svc_type = svc_type; cc_event->rlid = rlid; /* keep timestamp in units of 1.024 usec */ cc_event->timestamp = ktime_get_ns() / 1024; spin_unlock_irqrestore(&ppd->cc_log_lock, flags); } void process_becn(struct hfi1_pportdata *ppd, u8 sl, u32 rlid, u32 lqpn, u32 rqpn, u8 svc_type) { struct cca_timer *cca_timer; u16 ccti, ccti_incr, ccti_timer, ccti_limit; u8 trigger_threshold; struct cc_state *cc_state; unsigned long flags; if (sl >= OPA_MAX_SLS) return; cc_state = get_cc_state(ppd); if (!cc_state) return; /* * 1) increase CCTI (for this SL) * 2) select IPG (i.e., call set_link_ipg()) * 3) start timer */ ccti_limit = cc_state->cct.ccti_limit; ccti_incr = cc_state->cong_setting.entries[sl].ccti_increase; ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer; trigger_threshold = cc_state->cong_setting.entries[sl].trigger_threshold; spin_lock_irqsave(&ppd->cca_timer_lock, flags); cca_timer = &ppd->cca_timer[sl]; if (cca_timer->ccti < ccti_limit) { if (cca_timer->ccti + ccti_incr <= ccti_limit) cca_timer->ccti += ccti_incr; else cca_timer->ccti = ccti_limit; set_link_ipg(ppd); } ccti = cca_timer->ccti; if (!hrtimer_active(&cca_timer->hrtimer)) { /* ccti_timer is in units of 1.024 usec */ unsigned long nsec = 1024 * ccti_timer; hrtimer_start(&cca_timer->hrtimer, ns_to_ktime(nsec), HRTIMER_MODE_REL_PINNED); } spin_unlock_irqrestore(&ppd->cca_timer_lock, flags); if ((trigger_threshold != 0) && (ccti >= trigger_threshold)) log_cca_event(ppd, sl, rlid, lqpn, rqpn, svc_type); } /** * hfi1_rc_rcv - process an incoming RC packet * @packet: data packet information * * This is called from qp_rcv() to process an incoming RC packet * for the given QP. * May be called at interrupt level. */ void hfi1_rc_rcv(struct hfi1_packet *packet) { struct hfi1_ctxtdata *rcd = packet->rcd; void *data = packet->payload; u32 tlen = packet->tlen; struct rvt_qp *qp = packet->qp; struct hfi1_qp_priv *qpriv = qp->priv; struct hfi1_ibport *ibp = rcd_to_iport(rcd); struct ib_other_headers *ohdr = packet->ohdr; u32 opcode = packet->opcode; u32 hdrsize = packet->hlen; u32 psn = ib_bth_get_psn(packet->ohdr); u32 pad = packet->pad; struct ib_wc wc; u32 pmtu = qp->pmtu; int diff; struct ib_reth *reth; unsigned long flags; int ret; bool copy_last = false, fecn; u32 rkey; u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2); lockdep_assert_held(&qp->r_lock); if (hfi1_ruc_check_hdr(ibp, packet)) return; fecn = process_ecn(qp, packet); opfn_trigger_conn_request(qp, be32_to_cpu(ohdr->bth[1])); /* * Process responses (ACKs) before anything else. Note that the * packet sequence number will be for something in the send work * queue rather than the expected receive packet sequence number. * In other words, this QP is the requester. */ if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) && opcode <= OP(ATOMIC_ACKNOWLEDGE)) { rc_rcv_resp(packet); return; } /* Compute 24 bits worth of difference. */ diff = delta_psn(psn, qp->r_psn); if (unlikely(diff)) { if (rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd)) return; goto send_ack; } /* Check for opcode sequence errors. */ switch (qp->r_state) { case OP(SEND_FIRST): case OP(SEND_MIDDLE): if (opcode == OP(SEND_MIDDLE) || opcode == OP(SEND_LAST) || opcode == OP(SEND_LAST_WITH_IMMEDIATE) || opcode == OP(SEND_LAST_WITH_INVALIDATE)) break; goto nack_inv; case OP(RDMA_WRITE_FIRST): case OP(RDMA_WRITE_MIDDLE): if (opcode == OP(RDMA_WRITE_MIDDLE) || opcode == OP(RDMA_WRITE_LAST) || opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE)) break; goto nack_inv; default: if (opcode == OP(SEND_MIDDLE) || opcode == OP(SEND_LAST) || opcode == OP(SEND_LAST_WITH_IMMEDIATE) || opcode == OP(SEND_LAST_WITH_INVALIDATE) || opcode == OP(RDMA_WRITE_MIDDLE) || opcode == OP(RDMA_WRITE_LAST) || opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE)) goto nack_inv; /* * Note that it is up to the requester to not send a new * RDMA read or atomic operation before receiving an ACK * for the previous operation. */ break; } if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) rvt_comm_est(qp); /* OK, process the packet. */ switch (opcode) { case OP(SEND_FIRST): ret = rvt_get_rwqe(qp, false); if (ret < 0) goto nack_op_err; if (!ret) goto rnr_nak; qp->r_rcv_len = 0; fallthrough; case OP(SEND_MIDDLE): case OP(RDMA_WRITE_MIDDLE): send_middle: /* Check for invalid length PMTU or posted rwqe len. */ /* * There will be no padding for 9B packet but 16B packets * will come in with some padding since we always add * CRC and LT bytes which will need to be flit aligned */ if (unlikely(tlen != (hdrsize + pmtu + extra_bytes))) goto nack_inv; qp->r_rcv_len += pmtu; if (unlikely(qp->r_rcv_len > qp->r_len)) goto nack_inv; rvt_copy_sge(qp, &qp->r_sge, data, pmtu, true, false); break; case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE): /* consume RWQE */ ret = rvt_get_rwqe(qp, true); if (ret < 0) goto nack_op_err; if (!ret) goto rnr_nak; goto send_last_imm; case OP(SEND_ONLY): case OP(SEND_ONLY_WITH_IMMEDIATE): case OP(SEND_ONLY_WITH_INVALIDATE): ret = rvt_get_rwqe(qp, false); if (ret < 0) goto nack_op_err; if (!ret) goto rnr_nak; qp->r_rcv_len = 0; if (opcode == OP(SEND_ONLY)) goto no_immediate_data; if (opcode == OP(SEND_ONLY_WITH_INVALIDATE)) goto send_last_inv; fallthrough; /* for SEND_ONLY_WITH_IMMEDIATE */ case OP(SEND_LAST_WITH_IMMEDIATE): send_last_imm: wc.ex.imm_data = ohdr->u.imm_data; wc.wc_flags = IB_WC_WITH_IMM; goto send_last; case OP(SEND_LAST_WITH_INVALIDATE): send_last_inv: rkey = be32_to_cpu(ohdr->u.ieth); if (rvt_invalidate_rkey(qp, rkey)) goto no_immediate_data; wc.ex.invalidate_rkey = rkey; wc.wc_flags = IB_WC_WITH_INVALIDATE; goto send_last; case OP(RDMA_WRITE_LAST): copy_last = rvt_is_user_qp(qp); fallthrough; case OP(SEND_LAST): no_immediate_data: wc.wc_flags = 0; wc.ex.imm_data = 0; send_last: /* Check for invalid length. */ /* LAST len should be >= 1 */ if (unlikely(tlen < (hdrsize + extra_bytes))) goto nack_inv; /* Don't count the CRC(and padding and LT byte for 16B). */ tlen -= (hdrsize + extra_bytes); wc.byte_len = tlen + qp->r_rcv_len; if (unlikely(wc.byte_len > qp->r_len)) goto nack_inv; rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, copy_last); rvt_put_ss(&qp->r_sge); qp->r_msn++; if (!__test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) break; wc.wr_id = qp->r_wr_id; wc.status = IB_WC_SUCCESS; if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) || opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; else wc.opcode = IB_WC_RECV; wc.qp = &qp->ibqp; wc.src_qp = qp->remote_qpn; wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX; /* * It seems that IB mandates the presence of an SL in a * work completion only for the UD transport (see section * 11.4.2 of IBTA Vol. 1). * * However, the way the SL is chosen below is consistent * with the way that IB/qib works and is trying avoid * introducing incompatibilities. * * See also OPA Vol. 1, section 9.7.6, and table 9-17. */ wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr); /* zero fields that are N/A */ wc.vendor_err = 0; wc.pkey_index = 0; wc.dlid_path_bits = 0; wc.port_num = 0; /* Signal completion event if the solicited bit is set. */ rvt_recv_cq(qp, &wc, ib_bth_is_solicited(ohdr)); break; case OP(RDMA_WRITE_ONLY): copy_last = rvt_is_user_qp(qp); fallthrough; case OP(RDMA_WRITE_FIRST): case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) goto nack_inv; /* consume RWQE */ reth = &ohdr->u.rc.reth; qp->r_len = be32_to_cpu(reth->length); qp->r_rcv_len = 0; qp->r_sge.sg_list = NULL; if (qp->r_len != 0) { u32 rkey = be32_to_cpu(reth->rkey); u64 vaddr = get_ib_reth_vaddr(reth); int ok; /* Check rkey & NAK */ ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr, rkey, IB_ACCESS_REMOTE_WRITE); if (unlikely(!ok)) goto nack_acc; qp->r_sge.num_sge = 1; } else { qp->r_sge.num_sge = 0; qp->r_sge.sge.mr = NULL; qp->r_sge.sge.vaddr = NULL; qp->r_sge.sge.length = 0; qp->r_sge.sge.sge_length = 0; } if (opcode == OP(RDMA_WRITE_FIRST)) goto send_middle; else if (opcode == OP(RDMA_WRITE_ONLY)) goto no_immediate_data; ret = rvt_get_rwqe(qp, true); if (ret < 0) goto nack_op_err; if (!ret) { /* peer will send again */ rvt_put_ss(&qp->r_sge); goto rnr_nak; } wc.ex.imm_data = ohdr->u.rc.imm_data; wc.wc_flags = IB_WC_WITH_IMM; goto send_last; case OP(RDMA_READ_REQUEST): { struct rvt_ack_entry *e; u32 len; u8 next; if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) goto nack_inv; next = qp->r_head_ack_queue + 1; /* s_ack_queue is size rvt_size_atomic()+1 so use > not >= */ if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device))) next = 0; spin_lock_irqsave(&qp->s_lock, flags); if (unlikely(next == qp->s_acked_ack_queue)) { if (!qp->s_ack_queue[next].sent) goto nack_inv_unlck; update_ack_queue(qp, next); } e = &qp->s_ack_queue[qp->r_head_ack_queue]; release_rdma_sge_mr(e); reth = &ohdr->u.rc.reth; len = be32_to_cpu(reth->length); if (len) { u32 rkey = be32_to_cpu(reth->rkey); u64 vaddr = get_ib_reth_vaddr(reth); int ok; /* Check rkey & NAK */ ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey, IB_ACCESS_REMOTE_READ); if (unlikely(!ok)) goto nack_acc_unlck; /* * Update the next expected PSN. We add 1 later * below, so only add the remainder here. */ qp->r_psn += rvt_div_mtu(qp, len - 1); } else { e->rdma_sge.mr = NULL; e->rdma_sge.vaddr = NULL; e->rdma_sge.length = 0; e->rdma_sge.sge_length = 0; } e->opcode = opcode; e->sent = 0; e->psn = psn; e->lpsn = qp->r_psn; /* * We need to increment the MSN here instead of when we * finish sending the result since a duplicate request would * increment it more than once. */ qp->r_msn++; qp->r_psn++; qp->r_state = opcode; qp->r_nak_state = 0; qp->r_head_ack_queue = next; qpriv->r_tid_alloc = qp->r_head_ack_queue; /* Schedule the send engine. */ qp->s_flags |= RVT_S_RESP_PENDING; if (fecn) qp->s_flags |= RVT_S_ECN; hfi1_schedule_send(qp); spin_unlock_irqrestore(&qp->s_lock, flags); return; } case OP(COMPARE_SWAP): case OP(FETCH_ADD): { struct ib_atomic_eth *ateth = &ohdr->u.atomic_eth; u64 vaddr = get_ib_ateth_vaddr(ateth); bool opfn = opcode == OP(COMPARE_SWAP) && vaddr == HFI1_VERBS_E_ATOMIC_VADDR; struct rvt_ack_entry *e; atomic64_t *maddr; u64 sdata; u32 rkey; u8 next; if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) && !opfn)) goto nack_inv; next = qp->r_head_ack_queue + 1; if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device))) next = 0; spin_lock_irqsave(&qp->s_lock, flags); if (unlikely(next == qp->s_acked_ack_queue)) { if (!qp->s_ack_queue[next].sent) goto nack_inv_unlck; update_ack_queue(qp, next); } e = &qp->s_ack_queue[qp->r_head_ack_queue]; release_rdma_sge_mr(e); /* Process OPFN special virtual address */ if (opfn) { opfn_conn_response(qp, e, ateth); goto ack; } if (unlikely(vaddr & (sizeof(u64) - 1))) goto nack_inv_unlck; rkey = be32_to_cpu(ateth->rkey); /* Check rkey & NAK */ if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), vaddr, rkey, IB_ACCESS_REMOTE_ATOMIC))) goto nack_acc_unlck; /* Perform atomic OP and save result. */ maddr = (atomic64_t *)qp->r_sge.sge.vaddr; sdata = get_ib_ateth_swap(ateth); e->atomic_data = (opcode == OP(FETCH_ADD)) ? (u64)atomic64_add_return(sdata, maddr) - sdata : (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr, get_ib_ateth_compare(ateth), sdata); rvt_put_mr(qp->r_sge.sge.mr); qp->r_sge.num_sge = 0; ack: e->opcode = opcode; e->sent = 0; e->psn = psn; e->lpsn = psn; qp->r_msn++; qp->r_psn++; qp->r_state = opcode; qp->r_nak_state = 0; qp->r_head_ack_queue = next; qpriv->r_tid_alloc = qp->r_head_ack_queue; /* Schedule the send engine. */ qp->s_flags |= RVT_S_RESP_PENDING; if (fecn) qp->s_flags |= RVT_S_ECN; hfi1_schedule_send(qp); spin_unlock_irqrestore(&qp->s_lock, flags); return; } default: /* NAK unknown opcodes. */ goto nack_inv; } qp->r_psn++; qp->r_state = opcode; qp->r_ack_psn = psn; qp->r_nak_state = 0; /* Send an ACK if requested or required. */ if (psn & IB_BTH_REQ_ACK || fecn) { if (packet->numpkt == 0 || fecn || qp->r_adefered >= HFI1_PSN_CREDIT) { rc_cancel_ack(qp); goto send_ack; } qp->r_adefered++; rc_defered_ack(rcd, qp); } return; rnr_nak: qp->r_nak_state = qp->r_min_rnr_timer | IB_RNR_NAK; qp->r_ack_psn = qp->r_psn; /* Queue RNR NAK for later */ rc_defered_ack(rcd, qp); return; nack_op_err: rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR); qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR; qp->r_ack_psn = qp->r_psn; /* Queue NAK for later */ rc_defered_ack(rcd, qp); return; nack_inv_unlck: spin_unlock_irqrestore(&qp->s_lock, flags); nack_inv: rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR); qp->r_nak_state = IB_NAK_INVALID_REQUEST; qp->r_ack_psn = qp->r_psn; /* Queue NAK for later */ rc_defered_ack(rcd, qp); return; nack_acc_unlck: spin_unlock_irqrestore(&qp->s_lock, flags); nack_acc: rvt_rc_error(qp, IB_WC_LOC_PROT_ERR); qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR; qp->r_ack_psn = qp->r_psn; send_ack: hfi1_send_rc_ack(packet, fecn); } void hfi1_rc_hdrerr( struct hfi1_ctxtdata *rcd, struct hfi1_packet *packet, struct rvt_qp *qp) { struct hfi1_ibport *ibp = rcd_to_iport(rcd); int diff; u32 opcode; u32 psn; if (hfi1_ruc_check_hdr(ibp, packet)) return; psn = ib_bth_get_psn(packet->ohdr); opcode = ib_bth_get_opcode(packet->ohdr); /* Only deal with RDMA Writes for now */ if (opcode < IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) { diff = delta_psn(psn, qp->r_psn); if (!qp->r_nak_state && diff >= 0) { ibp->rvp.n_rc_seqnak++; qp->r_nak_state = IB_NAK_PSN_ERROR; /* Use the expected PSN. */ qp->r_ack_psn = qp->r_psn; /* * Wait to send the sequence * NAK until all packets * in the receive queue have * been processed. * Otherwise, we end up * propagating congestion. */ rc_defered_ack(rcd, qp); } /* Out of sequence NAK */ } /* QP Request NAKs */ }
linux-master
drivers/infiniband/hw/hfi1/rc.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2015 - 2020 Intel Corporation. * Copyright(c) 2021 Cornelis Networks. */ #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/vmalloc.h> #include <linux/delay.h> #include <linux/xarray.h> #include <linux/module.h> #include <linux/printk.h> #include <linux/hrtimer.h> #include <linux/bitmap.h> #include <linux/numa.h> #include <rdma/rdma_vt.h> #include "hfi.h" #include "device.h" #include "common.h" #include "trace.h" #include "mad.h" #include "sdma.h" #include "debugfs.h" #include "verbs.h" #include "aspm.h" #include "affinity.h" #include "vnic.h" #include "exp_rcv.h" #include "netdev.h" #undef pr_fmt #define pr_fmt(fmt) DRIVER_NAME ": " fmt /* * min buffers we want to have per context, after driver */ #define HFI1_MIN_USER_CTXT_BUFCNT 7 #define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */ #define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */ #define NUM_IB_PORTS 1 /* * Number of user receive contexts we are configured to use (to allow for more * pio buffers per ctxt, etc.) Zero means use one user context per CPU. */ int num_user_contexts = -1; module_param_named(num_user_contexts, num_user_contexts, int, 0444); MODULE_PARM_DESC( num_user_contexts, "Set max number of user contexts to use (default: -1 will use the real (non-HT) CPU count)"); uint krcvqs[RXE_NUM_DATA_VL]; int krcvqsset; module_param_array(krcvqs, uint, &krcvqsset, S_IRUGO); MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL"); /* computed based on above array */ unsigned long n_krcvqs; static unsigned hfi1_rcvarr_split = 25; module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO); MODULE_PARM_DESC(rcvarr_split, "Percent of context's RcvArray entries used for Eager buffers"); static uint eager_buffer_size = (8 << 20); /* 8MB */ module_param(eager_buffer_size, uint, S_IRUGO); MODULE_PARM_DESC(eager_buffer_size, "Size of the eager buffers, default: 8MB"); static uint rcvhdrcnt = 2048; /* 2x the max eager buffer count */ module_param_named(rcvhdrcnt, rcvhdrcnt, uint, S_IRUGO); MODULE_PARM_DESC(rcvhdrcnt, "Receive header queue count (default 2048)"); static uint hfi1_hdrq_entsize = 32; module_param_named(hdrq_entsize, hfi1_hdrq_entsize, uint, 0444); MODULE_PARM_DESC(hdrq_entsize, "Size of header queue entries: 2 - 8B, 16 - 64B, 32 - 128B (default)"); unsigned int user_credit_return_threshold = 33; /* default is 33% */ module_param(user_credit_return_threshold, uint, S_IRUGO); MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)"); DEFINE_XARRAY_FLAGS(hfi1_dev_table, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ); static int hfi1_create_kctxt(struct hfi1_devdata *dd, struct hfi1_pportdata *ppd) { struct hfi1_ctxtdata *rcd; int ret; /* Control context has to be always 0 */ BUILD_BUG_ON(HFI1_CTRL_CTXT != 0); ret = hfi1_create_ctxtdata(ppd, dd->node, &rcd); if (ret < 0) { dd_dev_err(dd, "Kernel receive context allocation failed\n"); return ret; } /* * Set up the kernel context flags here and now because they use * default values for all receive side memories. User contexts will * be handled as they are created. */ rcd->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) | HFI1_CAP_KGET(NODROP_RHQ_FULL) | HFI1_CAP_KGET(NODROP_EGR_FULL) | HFI1_CAP_KGET(DMA_RTAIL); /* Control context must use DMA_RTAIL */ if (rcd->ctxt == HFI1_CTRL_CTXT) rcd->flags |= HFI1_CAP_DMA_RTAIL; rcd->fast_handler = get_dma_rtail_setting(rcd) ? handle_receive_interrupt_dma_rtail : handle_receive_interrupt_nodma_rtail; hfi1_set_seq_cnt(rcd, 1); rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node); if (!rcd->sc) { dd_dev_err(dd, "Kernel send context allocation failed\n"); return -ENOMEM; } hfi1_init_ctxt(rcd->sc); return 0; } /* * Create the receive context array and one or more kernel contexts */ int hfi1_create_kctxts(struct hfi1_devdata *dd) { u16 i; int ret; dd->rcd = kcalloc_node(dd->num_rcv_contexts, sizeof(*dd->rcd), GFP_KERNEL, dd->node); if (!dd->rcd) return -ENOMEM; for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) { ret = hfi1_create_kctxt(dd, dd->pport); if (ret) goto bail; } return 0; bail: for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) hfi1_free_ctxt(dd->rcd[i]); /* All the contexts should be freed, free the array */ kfree(dd->rcd); dd->rcd = NULL; return ret; } /* * Helper routines for the receive context reference count (rcd and uctxt). */ static void hfi1_rcd_init(struct hfi1_ctxtdata *rcd) { kref_init(&rcd->kref); } /** * hfi1_rcd_free - When reference is zero clean up. * @kref: pointer to an initialized rcd data structure * */ static void hfi1_rcd_free(struct kref *kref) { unsigned long flags; struct hfi1_ctxtdata *rcd = container_of(kref, struct hfi1_ctxtdata, kref); spin_lock_irqsave(&rcd->dd->uctxt_lock, flags); rcd->dd->rcd[rcd->ctxt] = NULL; spin_unlock_irqrestore(&rcd->dd->uctxt_lock, flags); hfi1_free_ctxtdata(rcd->dd, rcd); kfree(rcd); } /** * hfi1_rcd_put - decrement reference for rcd * @rcd: pointer to an initialized rcd data structure * * Use this to put a reference after the init. */ int hfi1_rcd_put(struct hfi1_ctxtdata *rcd) { if (rcd) return kref_put(&rcd->kref, hfi1_rcd_free); return 0; } /** * hfi1_rcd_get - increment reference for rcd * @rcd: pointer to an initialized rcd data structure * * Use this to get a reference after the init. * * Return : reflect kref_get_unless_zero(), which returns non-zero on * increment, otherwise 0. */ int hfi1_rcd_get(struct hfi1_ctxtdata *rcd) { return kref_get_unless_zero(&rcd->kref); } /** * allocate_rcd_index - allocate an rcd index from the rcd array * @dd: pointer to a valid devdata structure * @rcd: rcd data structure to assign * @index: pointer to index that is allocated * * Find an empty index in the rcd array, and assign the given rcd to it. * If the array is full, we are EBUSY. * */ static int allocate_rcd_index(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd, u16 *index) { unsigned long flags; u16 ctxt; spin_lock_irqsave(&dd->uctxt_lock, flags); for (ctxt = 0; ctxt < dd->num_rcv_contexts; ctxt++) if (!dd->rcd[ctxt]) break; if (ctxt < dd->num_rcv_contexts) { rcd->ctxt = ctxt; dd->rcd[ctxt] = rcd; hfi1_rcd_init(rcd); } spin_unlock_irqrestore(&dd->uctxt_lock, flags); if (ctxt >= dd->num_rcv_contexts) return -EBUSY; *index = ctxt; return 0; } /** * hfi1_rcd_get_by_index_safe - validate the ctxt index before accessing the * array * @dd: pointer to a valid devdata structure * @ctxt: the index of an possilbe rcd * * This is a wrapper for hfi1_rcd_get_by_index() to validate that the given * ctxt index is valid. * * The caller is responsible for making the _put(). * */ struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd, u16 ctxt) { if (ctxt < dd->num_rcv_contexts) return hfi1_rcd_get_by_index(dd, ctxt); return NULL; } /** * hfi1_rcd_get_by_index - get by index * @dd: pointer to a valid devdata structure * @ctxt: the index of an possilbe rcd * * We need to protect access to the rcd array. If access is needed to * one or more index, get the protecting spinlock and then increment the * kref. * * The caller is responsible for making the _put(). * */ struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt) { unsigned long flags; struct hfi1_ctxtdata *rcd = NULL; spin_lock_irqsave(&dd->uctxt_lock, flags); if (dd->rcd[ctxt]) { rcd = dd->rcd[ctxt]; if (!hfi1_rcd_get(rcd)) rcd = NULL; } spin_unlock_irqrestore(&dd->uctxt_lock, flags); return rcd; } /* * Common code for user and kernel context create and setup. * NOTE: the initial kref is done here (hf1_rcd_init()). */ int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa, struct hfi1_ctxtdata **context) { struct hfi1_devdata *dd = ppd->dd; struct hfi1_ctxtdata *rcd; unsigned kctxt_ngroups = 0; u32 base; if (dd->rcv_entries.nctxt_extra > dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt) kctxt_ngroups = (dd->rcv_entries.nctxt_extra - (dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt)); rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, numa); if (rcd) { u32 rcvtids, max_entries; u16 ctxt; int ret; ret = allocate_rcd_index(dd, rcd, &ctxt); if (ret) { *context = NULL; kfree(rcd); return ret; } INIT_LIST_HEAD(&rcd->qp_wait_list); hfi1_exp_tid_group_init(rcd); rcd->ppd = ppd; rcd->dd = dd; rcd->numa_id = numa; rcd->rcv_array_groups = dd->rcv_entries.ngroups; rcd->rhf_rcv_function_map = normal_rhf_rcv_functions; rcd->slow_handler = handle_receive_interrupt; rcd->do_interrupt = rcd->slow_handler; rcd->msix_intr = CCE_NUM_MSIX_VECTORS; mutex_init(&rcd->exp_mutex); spin_lock_init(&rcd->exp_lock); INIT_LIST_HEAD(&rcd->flow_queue.queue_head); INIT_LIST_HEAD(&rcd->rarr_queue.queue_head); hfi1_cdbg(PROC, "setting up context %u", rcd->ctxt); /* * Calculate the context's RcvArray entry starting point. * We do this here because we have to take into account all * the RcvArray entries that previous context would have * taken and we have to account for any extra groups assigned * to the static (kernel) or dynamic (vnic/user) contexts. */ if (ctxt < dd->first_dyn_alloc_ctxt) { if (ctxt < kctxt_ngroups) { base = ctxt * (dd->rcv_entries.ngroups + 1); rcd->rcv_array_groups++; } else { base = kctxt_ngroups + (ctxt * dd->rcv_entries.ngroups); } } else { u16 ct = ctxt - dd->first_dyn_alloc_ctxt; base = ((dd->n_krcv_queues * dd->rcv_entries.ngroups) + kctxt_ngroups); if (ct < dd->rcv_entries.nctxt_extra) { base += ct * (dd->rcv_entries.ngroups + 1); rcd->rcv_array_groups++; } else { base += dd->rcv_entries.nctxt_extra + (ct * dd->rcv_entries.ngroups); } } rcd->eager_base = base * dd->rcv_entries.group_size; rcd->rcvhdrq_cnt = rcvhdrcnt; rcd->rcvhdrqentsize = hfi1_hdrq_entsize; rcd->rhf_offset = rcd->rcvhdrqentsize - sizeof(u64) / sizeof(u32); /* * Simple Eager buffer allocation: we have already pre-allocated * the number of RcvArray entry groups. Each ctxtdata structure * holds the number of groups for that context. * * To follow CSR requirements and maintain cacheline alignment, * make sure all sizes and bases are multiples of group_size. * * The expected entry count is what is left after assigning * eager. */ max_entries = rcd->rcv_array_groups * dd->rcv_entries.group_size; rcvtids = ((max_entries * hfi1_rcvarr_split) / 100); rcd->egrbufs.count = round_down(rcvtids, dd->rcv_entries.group_size); if (rcd->egrbufs.count > MAX_EAGER_ENTRIES) { dd_dev_err(dd, "ctxt%u: requested too many RcvArray entries.\n", rcd->ctxt); rcd->egrbufs.count = MAX_EAGER_ENTRIES; } hfi1_cdbg(PROC, "ctxt%u: max Eager buffer RcvArray entries: %u", rcd->ctxt, rcd->egrbufs.count); /* * Allocate array that will hold the eager buffer accounting * data. * This will allocate the maximum possible buffer count based * on the value of the RcvArray split parameter. * The resulting value will be rounded down to the closest * multiple of dd->rcv_entries.group_size. */ rcd->egrbufs.buffers = kcalloc_node(rcd->egrbufs.count, sizeof(*rcd->egrbufs.buffers), GFP_KERNEL, numa); if (!rcd->egrbufs.buffers) goto bail; rcd->egrbufs.rcvtids = kcalloc_node(rcd->egrbufs.count, sizeof(*rcd->egrbufs.rcvtids), GFP_KERNEL, numa); if (!rcd->egrbufs.rcvtids) goto bail; rcd->egrbufs.size = eager_buffer_size; /* * The size of the buffers programmed into the RcvArray * entries needs to be big enough to handle the highest * MTU supported. */ if (rcd->egrbufs.size < hfi1_max_mtu) { rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu); hfi1_cdbg(PROC, "ctxt%u: eager bufs size too small. Adjusting to %u", rcd->ctxt, rcd->egrbufs.size); } rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE; /* Applicable only for statically created kernel contexts */ if (ctxt < dd->first_dyn_alloc_ctxt) { rcd->opstats = kzalloc_node(sizeof(*rcd->opstats), GFP_KERNEL, numa); if (!rcd->opstats) goto bail; /* Initialize TID flow generations for the context */ hfi1_kern_init_ctxt_generations(rcd); } *context = rcd; return 0; } bail: *context = NULL; hfi1_free_ctxt(rcd); return -ENOMEM; } /** * hfi1_free_ctxt - free context * @rcd: pointer to an initialized rcd data structure * * This wrapper is the free function that matches hfi1_create_ctxtdata(). * When a context is done being used (kernel or user), this function is called * for the "final" put to match the kref init from hfi1_create_ctxtdata(). * Other users of the context do a get/put sequence to make sure that the * structure isn't removed while in use. */ void hfi1_free_ctxt(struct hfi1_ctxtdata *rcd) { hfi1_rcd_put(rcd); } /* * Select the largest ccti value over all SLs to determine the intra- * packet gap for the link. * * called with cca_timer_lock held (to protect access to cca_timer * array), and rcu_read_lock() (to protect access to cc_state). */ void set_link_ipg(struct hfi1_pportdata *ppd) { struct hfi1_devdata *dd = ppd->dd; struct cc_state *cc_state; int i; u16 cce, ccti_limit, max_ccti = 0; u16 shift, mult; u64 src; u32 current_egress_rate; /* Mbits /sec */ u64 max_pkt_time; /* * max_pkt_time is the maximum packet egress time in units * of the fabric clock period 1/(805 MHz). */ cc_state = get_cc_state(ppd); if (!cc_state) /* * This should _never_ happen - rcu_read_lock() is held, * and set_link_ipg() should not be called if cc_state * is NULL. */ return; for (i = 0; i < OPA_MAX_SLS; i++) { u16 ccti = ppd->cca_timer[i].ccti; if (ccti > max_ccti) max_ccti = ccti; } ccti_limit = cc_state->cct.ccti_limit; if (max_ccti > ccti_limit) max_ccti = ccti_limit; cce = cc_state->cct.entries[max_ccti].entry; shift = (cce & 0xc000) >> 14; mult = (cce & 0x3fff); current_egress_rate = active_egress_rate(ppd); max_pkt_time = egress_cycles(ppd->ibmaxlen, current_egress_rate); src = (max_pkt_time >> shift) * mult; src &= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SMASK; src <<= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SHIFT; write_csr(dd, SEND_STATIC_RATE_CONTROL, src); } static enum hrtimer_restart cca_timer_fn(struct hrtimer *t) { struct cca_timer *cca_timer; struct hfi1_pportdata *ppd; int sl; u16 ccti_timer, ccti_min; struct cc_state *cc_state; unsigned long flags; enum hrtimer_restart ret = HRTIMER_NORESTART; cca_timer = container_of(t, struct cca_timer, hrtimer); ppd = cca_timer->ppd; sl = cca_timer->sl; rcu_read_lock(); cc_state = get_cc_state(ppd); if (!cc_state) { rcu_read_unlock(); return HRTIMER_NORESTART; } /* * 1) decrement ccti for SL * 2) calculate IPG for link (set_link_ipg()) * 3) restart timer, unless ccti is at min value */ ccti_min = cc_state->cong_setting.entries[sl].ccti_min; ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer; spin_lock_irqsave(&ppd->cca_timer_lock, flags); if (cca_timer->ccti > ccti_min) { cca_timer->ccti--; set_link_ipg(ppd); } if (cca_timer->ccti > ccti_min) { unsigned long nsec = 1024 * ccti_timer; /* ccti_timer is in units of 1.024 usec */ hrtimer_forward_now(t, ns_to_ktime(nsec)); ret = HRTIMER_RESTART; } spin_unlock_irqrestore(&ppd->cca_timer_lock, flags); rcu_read_unlock(); return ret; } /* * Common code for initializing the physical port structure. */ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd, struct hfi1_devdata *dd, u8 hw_pidx, u32 port) { int i; uint default_pkey_idx; struct cc_state *cc_state; ppd->dd = dd; ppd->hw_pidx = hw_pidx; ppd->port = port; /* IB port number, not index */ ppd->prev_link_width = LINK_WIDTH_DEFAULT; /* * There are C_VL_COUNT number of PortVLXmitWait counters. * Adding 1 to C_VL_COUNT to include the PortXmitWait counter. */ for (i = 0; i < C_VL_COUNT + 1; i++) { ppd->port_vl_xmit_wait_last[i] = 0; ppd->vl_xmit_flit_cnt[i] = 0; } default_pkey_idx = 1; ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY; ppd->part_enforce |= HFI1_PART_ENFORCE_IN; ppd->pkeys[0] = 0x8001; INIT_WORK(&ppd->link_vc_work, handle_verify_cap); INIT_WORK(&ppd->link_up_work, handle_link_up); INIT_WORK(&ppd->link_down_work, handle_link_down); INIT_WORK(&ppd->freeze_work, handle_freeze); INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade); INIT_WORK(&ppd->sma_message_work, handle_sma_message); INIT_WORK(&ppd->link_bounce_work, handle_link_bounce); INIT_DELAYED_WORK(&ppd->start_link_work, handle_start_link); INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work); INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event); mutex_init(&ppd->hls_lock); spin_lock_init(&ppd->qsfp_info.qsfp_lock); ppd->qsfp_info.ppd = ppd; ppd->sm_trap_qp = 0x0; ppd->sa_qp = 0x1; ppd->hfi1_wq = NULL; spin_lock_init(&ppd->cca_timer_lock); for (i = 0; i < OPA_MAX_SLS; i++) { hrtimer_init(&ppd->cca_timer[i].hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ppd->cca_timer[i].ppd = ppd; ppd->cca_timer[i].sl = i; ppd->cca_timer[i].ccti = 0; ppd->cca_timer[i].hrtimer.function = cca_timer_fn; } ppd->cc_max_table_entries = IB_CC_TABLE_CAP_DEFAULT; spin_lock_init(&ppd->cc_state_lock); spin_lock_init(&ppd->cc_log_lock); cc_state = kzalloc(sizeof(*cc_state), GFP_KERNEL); RCU_INIT_POINTER(ppd->cc_state, cc_state); if (!cc_state) goto bail; return; bail: dd_dev_err(dd, "Congestion Control Agent disabled for port %d\n", port); } /* * Do initialization for device that is only needed on * first detect, not on resets. */ static int loadtime_init(struct hfi1_devdata *dd) { return 0; } /** * init_after_reset - re-initialize after a reset * @dd: the hfi1_ib device * * sanity check at least some of the values after reset, and * ensure no receive or transmit (explicitly, in case reset * failed */ static int init_after_reset(struct hfi1_devdata *dd) { int i; struct hfi1_ctxtdata *rcd; /* * Ensure chip does no sends or receives, tail updates, or * pioavail updates while we re-initialize. This is mostly * for the driver data structures, not chip registers. */ for (i = 0; i < dd->num_rcv_contexts; i++) { rcd = hfi1_rcd_get_by_index(dd, i); hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS | HFI1_RCVCTRL_INTRAVAIL_DIS | HFI1_RCVCTRL_TAILUPD_DIS, rcd); hfi1_rcd_put(rcd); } pio_send_control(dd, PSC_GLOBAL_DISABLE); for (i = 0; i < dd->num_send_contexts; i++) sc_disable(dd->send_contexts[i].sc); return 0; } static void enable_chip(struct hfi1_devdata *dd) { struct hfi1_ctxtdata *rcd; u32 rcvmask; u16 i; /* enable PIO send */ pio_send_control(dd, PSC_GLOBAL_ENABLE); /* * Enable kernel ctxts' receive and receive interrupt. * Other ctxts done as user opens and initializes them. */ for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) { rcd = hfi1_rcd_get_by_index(dd, i); if (!rcd) continue; rcvmask = HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB; rcvmask |= HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ? HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS; if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) rcvmask |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB; if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_RHQ_FULL)) rcvmask |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB; if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_EGR_FULL)) rcvmask |= HFI1_RCVCTRL_NO_EGR_DROP_ENB; if (HFI1_CAP_IS_KSET(TID_RDMA)) rcvmask |= HFI1_RCVCTRL_TIDFLOW_ENB; hfi1_rcvctrl(dd, rcvmask, rcd); sc_enable(rcd->sc); hfi1_rcd_put(rcd); } } /** * create_workqueues - create per port workqueues * @dd: the hfi1_ib device */ static int create_workqueues(struct hfi1_devdata *dd) { int pidx; struct hfi1_pportdata *ppd; for (pidx = 0; pidx < dd->num_pports; ++pidx) { ppd = dd->pport + pidx; if (!ppd->hfi1_wq) { ppd->hfi1_wq = alloc_workqueue( "hfi%d_%d", WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES, dd->unit, pidx); if (!ppd->hfi1_wq) goto wq_error; } if (!ppd->link_wq) { /* * Make the link workqueue single-threaded to enforce * serialization. */ ppd->link_wq = alloc_workqueue( "hfi_link_%d_%d", WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND, 1, /* max_active */ dd->unit, pidx); if (!ppd->link_wq) goto wq_error; } } return 0; wq_error: pr_err("alloc_workqueue failed for port %d\n", pidx + 1); for (pidx = 0; pidx < dd->num_pports; ++pidx) { ppd = dd->pport + pidx; if (ppd->hfi1_wq) { destroy_workqueue(ppd->hfi1_wq); ppd->hfi1_wq = NULL; } if (ppd->link_wq) { destroy_workqueue(ppd->link_wq); ppd->link_wq = NULL; } } return -ENOMEM; } /** * destroy_workqueues - destroy per port workqueues * @dd: the hfi1_ib device */ static void destroy_workqueues(struct hfi1_devdata *dd) { int pidx; struct hfi1_pportdata *ppd; for (pidx = 0; pidx < dd->num_pports; ++pidx) { ppd = dd->pport + pidx; if (ppd->hfi1_wq) { destroy_workqueue(ppd->hfi1_wq); ppd->hfi1_wq = NULL; } if (ppd->link_wq) { destroy_workqueue(ppd->link_wq); ppd->link_wq = NULL; } } } /** * enable_general_intr() - Enable the IRQs that will be handled by the * general interrupt handler. * @dd: valid devdata * */ static void enable_general_intr(struct hfi1_devdata *dd) { set_intr_bits(dd, CCE_ERR_INT, MISC_ERR_INT, true); set_intr_bits(dd, PIO_ERR_INT, TXE_ERR_INT, true); set_intr_bits(dd, IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END, true); set_intr_bits(dd, PBC_INT, GPIO_ASSERT_INT, true); set_intr_bits(dd, TCRIT_INT, TCRIT_INT, true); set_intr_bits(dd, IS_DC_START, IS_DC_END, true); set_intr_bits(dd, IS_SENDCREDIT_START, IS_SENDCREDIT_END, true); } /** * hfi1_init - do the actual initialization sequence on the chip * @dd: the hfi1_ib device * @reinit: re-initializing, so don't allocate new memory * * Do the actual initialization sequence on the chip. This is done * both from the init routine called from the PCI infrastructure, and * when we reset the chip, or detect that it was reset internally, * or it's administratively re-enabled. * * Memory allocation here and in called routines is only done in * the first case (reinit == 0). We have to be careful, because even * without memory allocation, we need to re-write all the chip registers * TIDs, etc. after the reset or enable has completed. */ int hfi1_init(struct hfi1_devdata *dd, int reinit) { int ret = 0, pidx, lastfail = 0; unsigned long len; u16 i; struct hfi1_ctxtdata *rcd; struct hfi1_pportdata *ppd; /* Set up send low level handlers */ dd->process_pio_send = hfi1_verbs_send_pio; dd->process_dma_send = hfi1_verbs_send_dma; dd->pio_inline_send = pio_copy; dd->process_vnic_dma_send = hfi1_vnic_send_dma; if (is_ax(dd)) { atomic_set(&dd->drop_packet, DROP_PACKET_ON); dd->do_drop = true; } else { atomic_set(&dd->drop_packet, DROP_PACKET_OFF); dd->do_drop = false; } /* make sure the link is not "up" */ for (pidx = 0; pidx < dd->num_pports; ++pidx) { ppd = dd->pport + pidx; ppd->linkup = 0; } if (reinit) ret = init_after_reset(dd); else ret = loadtime_init(dd); if (ret) goto done; /* dd->rcd can be NULL if early initialization failed */ for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) { /* * Set up the (kernel) rcvhdr queue and egr TIDs. If doing * re-init, the simplest way to handle this is to free * existing, and re-allocate. * Need to re-create rest of ctxt 0 ctxtdata as well. */ rcd = hfi1_rcd_get_by_index(dd, i); if (!rcd) continue; lastfail = hfi1_create_rcvhdrq(dd, rcd); if (!lastfail) lastfail = hfi1_setup_eagerbufs(rcd); if (!lastfail) lastfail = hfi1_kern_exp_rcv_init(rcd, reinit); if (lastfail) { dd_dev_err(dd, "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n"); ret = lastfail; } /* enable IRQ */ hfi1_rcd_put(rcd); } /* Allocate enough memory for user event notification. */ len = PAGE_ALIGN(chip_rcv_contexts(dd) * HFI1_MAX_SHARED_CTXTS * sizeof(*dd->events)); dd->events = vmalloc_user(len); if (!dd->events) dd_dev_err(dd, "Failed to allocate user events page\n"); /* * Allocate a page for device and port status. * Page will be shared amongst all user processes. */ dd->status = vmalloc_user(PAGE_SIZE); if (!dd->status) dd_dev_err(dd, "Failed to allocate dev status page\n"); for (pidx = 0; pidx < dd->num_pports; ++pidx) { ppd = dd->pport + pidx; if (dd->status) /* Currently, we only have one port */ ppd->statusp = &dd->status->port; set_mtu(ppd); } /* enable chip even if we have an error, so we can debug cause */ enable_chip(dd); done: /* * Set status even if port serdes is not initialized * so that diags will work. */ if (dd->status) dd->status->dev |= HFI1_STATUS_CHIP_PRESENT | HFI1_STATUS_INITTED; if (!ret) { /* enable all interrupts from the chip */ enable_general_intr(dd); init_qsfp_int(dd); /* chip is OK for user apps; mark it as initialized */ for (pidx = 0; pidx < dd->num_pports; ++pidx) { ppd = dd->pport + pidx; /* * start the serdes - must be after interrupts are * enabled so we are notified when the link goes up */ lastfail = bringup_serdes(ppd); if (lastfail) dd_dev_info(dd, "Failed to bring up port %u\n", ppd->port); /* * Set status even if port serdes is not initialized * so that diags will work. */ if (ppd->statusp) *ppd->statusp |= HFI1_STATUS_CHIP_PRESENT | HFI1_STATUS_INITTED; if (!ppd->link_speed_enabled) continue; } } /* if ret is non-zero, we probably should do some cleanup here... */ return ret; } struct hfi1_devdata *hfi1_lookup(int unit) { return xa_load(&hfi1_dev_table, unit); } /* * Stop the timers during unit shutdown, or after an error late * in initialization. */ static void stop_timers(struct hfi1_devdata *dd) { struct hfi1_pportdata *ppd; int pidx; for (pidx = 0; pidx < dd->num_pports; ++pidx) { ppd = dd->pport + pidx; if (ppd->led_override_timer.function) { del_timer_sync(&ppd->led_override_timer); atomic_set(&ppd->led_override_timer_active, 0); } } } /** * shutdown_device - shut down a device * @dd: the hfi1_ib device * * This is called to make the device quiet when we are about to * unload the driver, and also when the device is administratively * disabled. It does not free any data structures. * Everything it does has to be setup again by hfi1_init(dd, 1) */ static void shutdown_device(struct hfi1_devdata *dd) { struct hfi1_pportdata *ppd; struct hfi1_ctxtdata *rcd; unsigned pidx; int i; if (dd->flags & HFI1_SHUTDOWN) return; dd->flags |= HFI1_SHUTDOWN; for (pidx = 0; pidx < dd->num_pports; ++pidx) { ppd = dd->pport + pidx; ppd->linkup = 0; if (ppd->statusp) *ppd->statusp &= ~(HFI1_STATUS_IB_CONF | HFI1_STATUS_IB_READY); } dd->flags &= ~HFI1_INITTED; /* mask and clean up interrupts */ set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false); msix_clean_up_interrupts(dd); for (pidx = 0; pidx < dd->num_pports; ++pidx) { ppd = dd->pport + pidx; for (i = 0; i < dd->num_rcv_contexts; i++) { rcd = hfi1_rcd_get_by_index(dd, i); hfi1_rcvctrl(dd, HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS | HFI1_RCVCTRL_INTRAVAIL_DIS | HFI1_RCVCTRL_PKEY_DIS | HFI1_RCVCTRL_ONE_PKT_EGR_DIS, rcd); hfi1_rcd_put(rcd); } /* * Gracefully stop all sends allowing any in progress to * trickle out first. */ for (i = 0; i < dd->num_send_contexts; i++) sc_flush(dd->send_contexts[i].sc); } /* * Enough for anything that's going to trickle out to have actually * done so. */ udelay(20); for (pidx = 0; pidx < dd->num_pports; ++pidx) { ppd = dd->pport + pidx; /* disable all contexts */ for (i = 0; i < dd->num_send_contexts; i++) sc_disable(dd->send_contexts[i].sc); /* disable the send device */ pio_send_control(dd, PSC_GLOBAL_DISABLE); shutdown_led_override(ppd); /* * Clear SerdesEnable. * We can't count on interrupts since we are stopping. */ hfi1_quiet_serdes(ppd); if (ppd->hfi1_wq) flush_workqueue(ppd->hfi1_wq); if (ppd->link_wq) flush_workqueue(ppd->link_wq); } sdma_exit(dd); } /** * hfi1_free_ctxtdata - free a context's allocated data * @dd: the hfi1_ib device * @rcd: the ctxtdata structure * * free up any allocated data for a context * It should never change any chip state, or global driver state. */ void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) { u32 e; if (!rcd) return; if (rcd->rcvhdrq) { dma_free_coherent(&dd->pcidev->dev, rcvhdrq_size(rcd), rcd->rcvhdrq, rcd->rcvhdrq_dma); rcd->rcvhdrq = NULL; if (hfi1_rcvhdrtail_kvaddr(rcd)) { dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, (void *)hfi1_rcvhdrtail_kvaddr(rcd), rcd->rcvhdrqtailaddr_dma); rcd->rcvhdrtail_kvaddr = NULL; } } /* all the RcvArray entries should have been cleared by now */ kfree(rcd->egrbufs.rcvtids); rcd->egrbufs.rcvtids = NULL; for (e = 0; e < rcd->egrbufs.alloced; e++) { if (rcd->egrbufs.buffers[e].addr) dma_free_coherent(&dd->pcidev->dev, rcd->egrbufs.buffers[e].len, rcd->egrbufs.buffers[e].addr, rcd->egrbufs.buffers[e].dma); } kfree(rcd->egrbufs.buffers); rcd->egrbufs.alloced = 0; rcd->egrbufs.buffers = NULL; sc_free(rcd->sc); rcd->sc = NULL; vfree(rcd->subctxt_uregbase); vfree(rcd->subctxt_rcvegrbuf); vfree(rcd->subctxt_rcvhdr_base); kfree(rcd->opstats); rcd->subctxt_uregbase = NULL; rcd->subctxt_rcvegrbuf = NULL; rcd->subctxt_rcvhdr_base = NULL; rcd->opstats = NULL; } /* * Release our hold on the shared asic data. If we are the last one, * return the structure to be finalized outside the lock. Must be * holding hfi1_dev_table lock. */ static struct hfi1_asic_data *release_asic_data(struct hfi1_devdata *dd) { struct hfi1_asic_data *ad; int other; if (!dd->asic_data) return NULL; dd->asic_data->dds[dd->hfi1_id] = NULL; other = dd->hfi1_id ? 0 : 1; ad = dd->asic_data; dd->asic_data = NULL; /* return NULL if the other dd still has a link */ return ad->dds[other] ? NULL : ad; } static void finalize_asic_data(struct hfi1_devdata *dd, struct hfi1_asic_data *ad) { clean_up_i2c(dd, ad); kfree(ad); } /** * hfi1_free_devdata - cleans up and frees per-unit data structure * @dd: pointer to a valid devdata structure * * It cleans up and frees all data structures set up by * by hfi1_alloc_devdata(). */ void hfi1_free_devdata(struct hfi1_devdata *dd) { struct hfi1_asic_data *ad; unsigned long flags; xa_lock_irqsave(&hfi1_dev_table, flags); __xa_erase(&hfi1_dev_table, dd->unit); ad = release_asic_data(dd); xa_unlock_irqrestore(&hfi1_dev_table, flags); finalize_asic_data(dd, ad); free_platform_config(dd); rcu_barrier(); /* wait for rcu callbacks to complete */ free_percpu(dd->int_counter); free_percpu(dd->rcv_limit); free_percpu(dd->send_schedule); free_percpu(dd->tx_opstats); dd->int_counter = NULL; dd->rcv_limit = NULL; dd->send_schedule = NULL; dd->tx_opstats = NULL; kfree(dd->comp_vect); dd->comp_vect = NULL; if (dd->rcvhdrtail_dummy_kvaddr) dma_free_coherent(&dd->pcidev->dev, sizeof(u64), (void *)dd->rcvhdrtail_dummy_kvaddr, dd->rcvhdrtail_dummy_dma); dd->rcvhdrtail_dummy_kvaddr = NULL; sdma_clean(dd, dd->num_sdma); rvt_dealloc_device(&dd->verbs_dev.rdi); } /** * hfi1_alloc_devdata - Allocate our primary per-unit data structure. * @pdev: Valid PCI device * @extra: How many bytes to alloc past the default * * Must be done via verbs allocator, because the verbs cleanup process * both does cleanup and free of the data structure. * "extra" is for chip-specific data. */ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra) { struct hfi1_devdata *dd; int ret, nports; /* extra is * number of ports */ nports = extra / sizeof(struct hfi1_pportdata); dd = (struct hfi1_devdata *)rvt_alloc_device(sizeof(*dd) + extra, nports); if (!dd) return ERR_PTR(-ENOMEM); dd->num_pports = nports; dd->pport = (struct hfi1_pportdata *)(dd + 1); dd->pcidev = pdev; pci_set_drvdata(pdev, dd); ret = xa_alloc_irq(&hfi1_dev_table, &dd->unit, dd, xa_limit_32b, GFP_KERNEL); if (ret < 0) { dev_err(&pdev->dev, "Could not allocate unit ID: error %d\n", -ret); goto bail; } rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit); /* * If the BIOS does not have the NUMA node information set, select * NUMA 0 so we get consistent performance. */ dd->node = pcibus_to_node(pdev->bus); if (dd->node == NUMA_NO_NODE) { dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n"); dd->node = 0; } /* * Initialize all locks for the device. This needs to be as early as * possible so locks are usable. */ spin_lock_init(&dd->sc_lock); spin_lock_init(&dd->sendctrl_lock); spin_lock_init(&dd->rcvctrl_lock); spin_lock_init(&dd->uctxt_lock); spin_lock_init(&dd->hfi1_diag_trans_lock); spin_lock_init(&dd->sc_init_lock); spin_lock_init(&dd->dc8051_memlock); seqlock_init(&dd->sc2vl_lock); spin_lock_init(&dd->sde_map_lock); spin_lock_init(&dd->pio_map_lock); mutex_init(&dd->dc8051_lock); init_waitqueue_head(&dd->event_queue); spin_lock_init(&dd->irq_src_lock); dd->int_counter = alloc_percpu(u64); if (!dd->int_counter) { ret = -ENOMEM; goto bail; } dd->rcv_limit = alloc_percpu(u64); if (!dd->rcv_limit) { ret = -ENOMEM; goto bail; } dd->send_schedule = alloc_percpu(u64); if (!dd->send_schedule) { ret = -ENOMEM; goto bail; } dd->tx_opstats = alloc_percpu(struct hfi1_opcode_stats_perctx); if (!dd->tx_opstats) { ret = -ENOMEM; goto bail; } dd->comp_vect = kzalloc(sizeof(*dd->comp_vect), GFP_KERNEL); if (!dd->comp_vect) { ret = -ENOMEM; goto bail; } /* allocate dummy tail memory for all receive contexts */ dd->rcvhdrtail_dummy_kvaddr = dma_alloc_coherent(&dd->pcidev->dev, sizeof(u64), &dd->rcvhdrtail_dummy_dma, GFP_KERNEL); if (!dd->rcvhdrtail_dummy_kvaddr) { ret = -ENOMEM; goto bail; } atomic_set(&dd->ipoib_rsm_usr_num, 0); return dd; bail: hfi1_free_devdata(dd); return ERR_PTR(ret); } /* * Called from freeze mode handlers, and from PCI error * reporting code. Should be paranoid about state of * system and data structures. */ void hfi1_disable_after_error(struct hfi1_devdata *dd) { if (dd->flags & HFI1_INITTED) { u32 pidx; dd->flags &= ~HFI1_INITTED; if (dd->pport) for (pidx = 0; pidx < dd->num_pports; ++pidx) { struct hfi1_pportdata *ppd; ppd = dd->pport + pidx; if (dd->flags & HFI1_PRESENT) set_link_state(ppd, HLS_DN_DISABLE); if (ppd->statusp) *ppd->statusp &= ~HFI1_STATUS_IB_READY; } } /* * Mark as having had an error for driver, and also * for /sys and status word mapped to user programs. * This marks unit as not usable, until reset. */ if (dd->status) dd->status->dev |= HFI1_STATUS_HWERROR; } static void remove_one(struct pci_dev *); static int init_one(struct pci_dev *, const struct pci_device_id *); static void shutdown_one(struct pci_dev *); #define DRIVER_LOAD_MSG "Cornelis " DRIVER_NAME " loaded: " #define PFX DRIVER_NAME ": " const struct pci_device_id hfi1_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL0) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL1) }, { 0, } }; MODULE_DEVICE_TABLE(pci, hfi1_pci_tbl); static struct pci_driver hfi1_pci_driver = { .name = DRIVER_NAME, .probe = init_one, .remove = remove_one, .shutdown = shutdown_one, .id_table = hfi1_pci_tbl, .err_handler = &hfi1_pci_err_handler, }; static void __init compute_krcvqs(void) { int i; for (i = 0; i < krcvqsset; i++) n_krcvqs += krcvqs[i]; } /* * Do all the generic driver unit- and chip-independent memory * allocation and initialization. */ static int __init hfi1_mod_init(void) { int ret; ret = dev_init(); if (ret) goto bail; ret = node_affinity_init(); if (ret) goto bail; /* validate max MTU before any devices start */ if (!valid_opa_max_mtu(hfi1_max_mtu)) { pr_err("Invalid max_mtu 0x%x, using 0x%x instead\n", hfi1_max_mtu, HFI1_DEFAULT_MAX_MTU); hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU; } /* valid CUs run from 1-128 in powers of 2 */ if (hfi1_cu > 128 || !is_power_of_2(hfi1_cu)) hfi1_cu = 1; /* valid credit return threshold is 0-100, variable is unsigned */ if (user_credit_return_threshold > 100) user_credit_return_threshold = 100; compute_krcvqs(); /* * sanitize receive interrupt count, time must wait until after * the hardware type is known */ if (rcv_intr_count > RCV_HDR_HEAD_COUNTER_MASK) rcv_intr_count = RCV_HDR_HEAD_COUNTER_MASK; /* reject invalid combinations */ if (rcv_intr_count == 0 && rcv_intr_timeout == 0) { pr_err("Invalid mode: both receive interrupt count and available timeout are zero - setting interrupt count to 1\n"); rcv_intr_count = 1; } if (rcv_intr_count > 1 && rcv_intr_timeout == 0) { /* * Avoid indefinite packet delivery by requiring a timeout * if count is > 1. */ pr_err("Invalid mode: receive interrupt count greater than 1 and available timeout is zero - setting available timeout to 1\n"); rcv_intr_timeout = 1; } if (rcv_intr_dynamic && !(rcv_intr_count > 1 && rcv_intr_timeout > 0)) { /* * The dynamic algorithm expects a non-zero timeout * and a count > 1. */ pr_err("Invalid mode: dynamic receive interrupt mitigation with invalid count and timeout - turning dynamic off\n"); rcv_intr_dynamic = 0; } /* sanitize link CRC options */ link_crc_mask &= SUPPORTED_CRCS; ret = opfn_init(); if (ret < 0) { pr_err("Failed to allocate opfn_wq"); goto bail_dev; } /* * These must be called before the driver is registered with * the PCI subsystem. */ hfi1_dbg_init(); ret = pci_register_driver(&hfi1_pci_driver); if (ret < 0) { pr_err("Unable to register driver: error %d\n", -ret); goto bail_dev; } goto bail; /* all OK */ bail_dev: hfi1_dbg_exit(); dev_cleanup(); bail: return ret; } module_init(hfi1_mod_init); /* * Do the non-unit driver cleanup, memory free, etc. at unload. */ static void __exit hfi1_mod_cleanup(void) { pci_unregister_driver(&hfi1_pci_driver); opfn_exit(); node_affinity_destroy_all(); hfi1_dbg_exit(); WARN_ON(!xa_empty(&hfi1_dev_table)); dispose_firmware(); /* asymmetric with obtain_firmware() */ dev_cleanup(); } module_exit(hfi1_mod_cleanup); /* this can only be called after a successful initialization */ static void cleanup_device_data(struct hfi1_devdata *dd) { int ctxt; int pidx; /* users can't do anything more with chip */ for (pidx = 0; pidx < dd->num_pports; ++pidx) { struct hfi1_pportdata *ppd = &dd->pport[pidx]; struct cc_state *cc_state; int i; if (ppd->statusp) *ppd->statusp &= ~HFI1_STATUS_CHIP_PRESENT; for (i = 0; i < OPA_MAX_SLS; i++) hrtimer_cancel(&ppd->cca_timer[i].hrtimer); spin_lock(&ppd->cc_state_lock); cc_state = get_cc_state_protected(ppd); RCU_INIT_POINTER(ppd->cc_state, NULL); spin_unlock(&ppd->cc_state_lock); if (cc_state) kfree_rcu(cc_state, rcu); } free_credit_return(dd); /* * Free any resources still in use (usually just kernel contexts) * at unload; we do for ctxtcnt, because that's what we allocate. */ for (ctxt = 0; dd->rcd && ctxt < dd->num_rcv_contexts; ctxt++) { struct hfi1_ctxtdata *rcd = dd->rcd[ctxt]; if (rcd) { hfi1_free_ctxt_rcv_groups(rcd); hfi1_free_ctxt(rcd); } } kfree(dd->rcd); dd->rcd = NULL; free_pio_map(dd); /* must follow rcv context free - need to remove rcv's hooks */ for (ctxt = 0; ctxt < dd->num_send_contexts; ctxt++) sc_free(dd->send_contexts[ctxt].sc); dd->num_send_contexts = 0; kfree(dd->send_contexts); dd->send_contexts = NULL; kfree(dd->hw_to_sw); dd->hw_to_sw = NULL; kfree(dd->boardname); vfree(dd->events); vfree(dd->status); } /* * Clean up on unit shutdown, or error during unit load after * successful initialization. */ static void postinit_cleanup(struct hfi1_devdata *dd) { hfi1_start_cleanup(dd); hfi1_comp_vectors_clean_up(dd); hfi1_dev_affinity_clean_up(dd); hfi1_pcie_ddcleanup(dd); hfi1_pcie_cleanup(dd->pcidev); cleanup_device_data(dd); hfi1_free_devdata(dd); } static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int ret = 0, j, pidx, initfail; struct hfi1_devdata *dd; struct hfi1_pportdata *ppd; /* First, lock the non-writable module parameters */ HFI1_CAP_LOCK(); /* Validate dev ids */ if (!(ent->device == PCI_DEVICE_ID_INTEL0 || ent->device == PCI_DEVICE_ID_INTEL1)) { dev_err(&pdev->dev, "Failing on unknown Intel deviceid 0x%x\n", ent->device); ret = -ENODEV; goto bail; } /* Allocate the dd so we can get to work */ dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS * sizeof(struct hfi1_pportdata)); if (IS_ERR(dd)) { ret = PTR_ERR(dd); goto bail; } /* Validate some global module parameters */ ret = hfi1_validate_rcvhdrcnt(dd, rcvhdrcnt); if (ret) goto bail; /* use the encoding function as a sanitization check */ if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) { dd_dev_err(dd, "Invalid HdrQ Entry size %u\n", hfi1_hdrq_entsize); ret = -EINVAL; goto bail; } /* The receive eager buffer size must be set before the receive * contexts are created. * * Set the eager buffer size. Validate that it falls in a range * allowed by the hardware - all powers of 2 between the min and * max. The maximum valid MTU is within the eager buffer range * so we do not need to cap the max_mtu by an eager buffer size * setting. */ if (eager_buffer_size) { if (!is_power_of_2(eager_buffer_size)) eager_buffer_size = roundup_pow_of_two(eager_buffer_size); eager_buffer_size = clamp_val(eager_buffer_size, MIN_EAGER_BUFFER * 8, MAX_EAGER_BUFFER_TOTAL); dd_dev_info(dd, "Eager buffer size %u\n", eager_buffer_size); } else { dd_dev_err(dd, "Invalid Eager buffer size of 0\n"); ret = -EINVAL; goto bail; } /* restrict value of hfi1_rcvarr_split */ hfi1_rcvarr_split = clamp_val(hfi1_rcvarr_split, 0, 100); ret = hfi1_pcie_init(dd); if (ret) goto bail; /* * Do device-specific initialization, function table setup, dd * allocation, etc. */ ret = hfi1_init_dd(dd); if (ret) goto clean_bail; /* error already printed */ ret = create_workqueues(dd); if (ret) goto clean_bail; /* do the generic initialization */ initfail = hfi1_init(dd, 0); ret = hfi1_register_ib_device(dd); /* * Now ready for use. this should be cleared whenever we * detect a reset, or initiate one. If earlier failure, * we still create devices, so diags, etc. can be used * to determine cause of problem. */ if (!initfail && !ret) { dd->flags |= HFI1_INITTED; /* create debufs files after init and ib register */ hfi1_dbg_ibdev_init(&dd->verbs_dev); } j = hfi1_device_create(dd); if (j) dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j); if (initfail || ret) { msix_clean_up_interrupts(dd); stop_timers(dd); flush_workqueue(ib_wq); for (pidx = 0; pidx < dd->num_pports; ++pidx) { hfi1_quiet_serdes(dd->pport + pidx); ppd = dd->pport + pidx; if (ppd->hfi1_wq) { destroy_workqueue(ppd->hfi1_wq); ppd->hfi1_wq = NULL; } if (ppd->link_wq) { destroy_workqueue(ppd->link_wq); ppd->link_wq = NULL; } } if (!j) hfi1_device_remove(dd); if (!ret) hfi1_unregister_ib_device(dd); postinit_cleanup(dd); if (initfail) ret = initfail; goto bail; /* everything already cleaned */ } sdma_start(dd); return 0; clean_bail: hfi1_pcie_cleanup(pdev); bail: return ret; } static void wait_for_clients(struct hfi1_devdata *dd) { /* * Remove the device init value and complete the device if there is * no clients or wait for active clients to finish. */ if (refcount_dec_and_test(&dd->user_refcount)) complete(&dd->user_comp); wait_for_completion(&dd->user_comp); } static void remove_one(struct pci_dev *pdev) { struct hfi1_devdata *dd = pci_get_drvdata(pdev); /* close debugfs files before ib unregister */ hfi1_dbg_ibdev_exit(&dd->verbs_dev); /* remove the /dev hfi1 interface */ hfi1_device_remove(dd); /* wait for existing user space clients to finish */ wait_for_clients(dd); /* unregister from IB core */ hfi1_unregister_ib_device(dd); /* free netdev data */ hfi1_free_rx(dd); /* * Disable the IB link, disable interrupts on the device, * clear dma engines, etc. */ shutdown_device(dd); destroy_workqueues(dd); stop_timers(dd); /* wait until all of our (qsfp) queue_work() calls complete */ flush_workqueue(ib_wq); postinit_cleanup(dd); } static void shutdown_one(struct pci_dev *pdev) { struct hfi1_devdata *dd = pci_get_drvdata(pdev); shutdown_device(dd); } /** * hfi1_create_rcvhdrq - create a receive header queue * @dd: the hfi1_ib device * @rcd: the context data * * This must be contiguous memory (from an i/o perspective), and must be * DMA'able (which means for some systems, it will go through an IOMMU, * or be forced into a low address range). */ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) { unsigned amt; if (!rcd->rcvhdrq) { amt = rcvhdrq_size(rcd); rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt, &rcd->rcvhdrq_dma, GFP_KERNEL); if (!rcd->rcvhdrq) { dd_dev_err(dd, "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n", amt, rcd->ctxt); goto bail; } if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) || HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) { rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(&dd->pcidev->dev, PAGE_SIZE, &rcd->rcvhdrqtailaddr_dma, GFP_KERNEL); if (!rcd->rcvhdrtail_kvaddr) goto bail_free; } } set_hdrq_regs(rcd->dd, rcd->ctxt, rcd->rcvhdrqentsize, rcd->rcvhdrq_cnt); return 0; bail_free: dd_dev_err(dd, "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n", rcd->ctxt); dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq, rcd->rcvhdrq_dma); rcd->rcvhdrq = NULL; bail: return -ENOMEM; } /** * hfi1_setup_eagerbufs - llocate eager buffers, both kernel and user * contexts. * @rcd: the context we are setting up. * * Allocate the eager TID buffers and program them into hip. * They are no longer completely contiguous, we do multiple allocation * calls. Otherwise we get the OOM code involved, by asking for too * much per call, with disastrous results on some kernels. */ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd) { struct hfi1_devdata *dd = rcd->dd; u32 max_entries, egrtop, alloced_bytes = 0; u16 order, idx = 0; int ret = 0; u16 round_mtu = roundup_pow_of_two(hfi1_max_mtu); /* * The minimum size of the eager buffers is a groups of MTU-sized * buffers. * The global eager_buffer_size parameter is checked against the * theoretical lower limit of the value. Here, we check against the * MTU. */ if (rcd->egrbufs.size < (round_mtu * dd->rcv_entries.group_size)) rcd->egrbufs.size = round_mtu * dd->rcv_entries.group_size; /* * If using one-pkt-per-egr-buffer, lower the eager buffer * size to the max MTU (page-aligned). */ if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) rcd->egrbufs.rcvtid_size = round_mtu; /* * Eager buffers sizes of 1MB or less require smaller TID sizes * to satisfy the "multiple of 8 RcvArray entries" requirement. */ if (rcd->egrbufs.size <= (1 << 20)) rcd->egrbufs.rcvtid_size = max((unsigned long)round_mtu, rounddown_pow_of_two(rcd->egrbufs.size / 8)); while (alloced_bytes < rcd->egrbufs.size && rcd->egrbufs.alloced < rcd->egrbufs.count) { rcd->egrbufs.buffers[idx].addr = dma_alloc_coherent(&dd->pcidev->dev, rcd->egrbufs.rcvtid_size, &rcd->egrbufs.buffers[idx].dma, GFP_KERNEL); if (rcd->egrbufs.buffers[idx].addr) { rcd->egrbufs.buffers[idx].len = rcd->egrbufs.rcvtid_size; rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].addr = rcd->egrbufs.buffers[idx].addr; rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].dma = rcd->egrbufs.buffers[idx].dma; rcd->egrbufs.alloced++; alloced_bytes += rcd->egrbufs.rcvtid_size; idx++; } else { u32 new_size, i, j; u64 offset = 0; /* * Fail the eager buffer allocation if: * - we are already using the lowest acceptable size * - we are using one-pkt-per-egr-buffer (this implies * that we are accepting only one size) */ if (rcd->egrbufs.rcvtid_size == round_mtu || !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) { dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n", rcd->ctxt); ret = -ENOMEM; goto bail_rcvegrbuf_phys; } new_size = rcd->egrbufs.rcvtid_size / 2; /* * If the first attempt to allocate memory failed, don't * fail everything but continue with the next lower * size. */ if (idx == 0) { rcd->egrbufs.rcvtid_size = new_size; continue; } /* * Re-partition already allocated buffers to a smaller * size. */ rcd->egrbufs.alloced = 0; for (i = 0, j = 0, offset = 0; j < idx; i++) { if (i >= rcd->egrbufs.count) break; rcd->egrbufs.rcvtids[i].dma = rcd->egrbufs.buffers[j].dma + offset; rcd->egrbufs.rcvtids[i].addr = rcd->egrbufs.buffers[j].addr + offset; rcd->egrbufs.alloced++; if ((rcd->egrbufs.buffers[j].dma + offset + new_size) == (rcd->egrbufs.buffers[j].dma + rcd->egrbufs.buffers[j].len)) { j++; offset = 0; } else { offset += new_size; } } rcd->egrbufs.rcvtid_size = new_size; } } rcd->egrbufs.numbufs = idx; rcd->egrbufs.size = alloced_bytes; hfi1_cdbg(PROC, "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %uKB", rcd->ctxt, rcd->egrbufs.alloced, rcd->egrbufs.rcvtid_size / 1024, rcd->egrbufs.size / 1024); /* * Set the contexts rcv array head update threshold to the closest * power of 2 (so we can use a mask instead of modulo) below half * the allocated entries. */ rcd->egrbufs.threshold = rounddown_pow_of_two(rcd->egrbufs.alloced / 2); /* * Compute the expected RcvArray entry base. This is done after * allocating the eager buffers in order to maximize the * expected RcvArray entries for the context. */ max_entries = rcd->rcv_array_groups * dd->rcv_entries.group_size; egrtop = roundup(rcd->egrbufs.alloced, dd->rcv_entries.group_size); rcd->expected_count = max_entries - egrtop; if (rcd->expected_count > MAX_TID_PAIR_ENTRIES * 2) rcd->expected_count = MAX_TID_PAIR_ENTRIES * 2; rcd->expected_base = rcd->eager_base + egrtop; hfi1_cdbg(PROC, "ctxt%u: eager:%u, exp:%u, egrbase:%u, expbase:%u", rcd->ctxt, rcd->egrbufs.alloced, rcd->expected_count, rcd->eager_base, rcd->expected_base); if (!hfi1_rcvbuf_validate(rcd->egrbufs.rcvtid_size, PT_EAGER, &order)) { hfi1_cdbg(PROC, "ctxt%u: current Eager buffer size is invalid %u", rcd->ctxt, rcd->egrbufs.rcvtid_size); ret = -EINVAL; goto bail_rcvegrbuf_phys; } for (idx = 0; idx < rcd->egrbufs.alloced; idx++) { hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER, rcd->egrbufs.rcvtids[idx].dma, order); cond_resched(); } return 0; bail_rcvegrbuf_phys: for (idx = 0; idx < rcd->egrbufs.alloced && rcd->egrbufs.buffers[idx].addr; idx++) { dma_free_coherent(&dd->pcidev->dev, rcd->egrbufs.buffers[idx].len, rcd->egrbufs.buffers[idx].addr, rcd->egrbufs.buffers[idx].dma); rcd->egrbufs.buffers[idx].addr = NULL; rcd->egrbufs.buffers[idx].dma = 0; rcd->egrbufs.buffers[idx].len = 0; } return ret; }
linux-master
drivers/infiniband/hw/hfi1/init.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2023 - Cornelis Networks, Inc. */ #include <linux/types.h> #include "hfi.h" #include "common.h" #include "device.h" #include "pinning.h" #include "mmu_rb.h" #include "user_sdma.h" #include "trace.h" struct sdma_mmu_node { struct mmu_rb_node rb; struct hfi1_user_sdma_pkt_q *pq; struct page **pages; unsigned int npages; }; static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr, unsigned long len); static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode, void *arg2, bool *stop); static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode); static struct mmu_rb_ops sdma_rb_ops = { .filter = sdma_rb_filter, .evict = sdma_rb_evict, .remove = sdma_rb_remove, }; int hfi1_init_system_pinning(struct hfi1_user_sdma_pkt_q *pq) { struct hfi1_devdata *dd = pq->dd; int ret; ret = hfi1_mmu_rb_register(pq, &sdma_rb_ops, dd->pport->hfi1_wq, &pq->handler); if (ret) dd_dev_err(dd, "[%u:%u] Failed to register system memory DMA support with MMU: %d\n", pq->ctxt, pq->subctxt, ret); return ret; } void hfi1_free_system_pinning(struct hfi1_user_sdma_pkt_q *pq) { if (pq->handler) hfi1_mmu_rb_unregister(pq->handler); } static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages) { struct evict_data evict_data; evict_data.cleared = 0; evict_data.target = npages; hfi1_mmu_rb_evict(pq->handler, &evict_data); return evict_data.cleared; } static void unpin_vector_pages(struct mm_struct *mm, struct page **pages, unsigned int start, unsigned int npages) { hfi1_release_user_pages(mm, pages + start, npages, false); kfree(pages); } static inline struct mm_struct *mm_from_sdma_node(struct sdma_mmu_node *node) { return node->rb.handler->mn.mm; } static void free_system_node(struct sdma_mmu_node *node) { if (node->npages) { unpin_vector_pages(mm_from_sdma_node(node), node->pages, 0, node->npages); atomic_sub(node->npages, &node->pq->n_locked); } kfree(node); } /* * kref_get()'s an additional kref on the returned rb_node to prevent rb_node * from being released until after rb_node is assigned to an SDMA descriptor * (struct sdma_desc) under add_system_iovec_to_sdma_packet(), even if the * virtual address range for rb_node is invalidated between now and then. */ static struct sdma_mmu_node *find_system_node(struct mmu_rb_handler *handler, unsigned long start, unsigned long end) { struct mmu_rb_node *rb_node; unsigned long flags; spin_lock_irqsave(&handler->lock, flags); rb_node = hfi1_mmu_rb_get_first(handler, start, (end - start)); if (!rb_node) { spin_unlock_irqrestore(&handler->lock, flags); return NULL; } /* "safety" kref to prevent release before add_system_iovec_to_sdma_packet() */ kref_get(&rb_node->refcount); spin_unlock_irqrestore(&handler->lock, flags); return container_of(rb_node, struct sdma_mmu_node, rb); } static int pin_system_pages(struct user_sdma_request *req, uintptr_t start_address, size_t length, struct sdma_mmu_node *node, int npages) { struct hfi1_user_sdma_pkt_q *pq = req->pq; int pinned, cleared; struct page **pages; pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL); if (!pages) return -ENOMEM; retry: if (!hfi1_can_pin_pages(pq->dd, current->mm, atomic_read(&pq->n_locked), npages)) { SDMA_DBG(req, "Evicting: nlocked %u npages %u", atomic_read(&pq->n_locked), npages); cleared = sdma_cache_evict(pq, npages); if (cleared >= npages) goto retry; } SDMA_DBG(req, "Acquire user pages start_address %lx node->npages %u npages %u", start_address, node->npages, npages); pinned = hfi1_acquire_user_pages(current->mm, start_address, npages, 0, pages); if (pinned < 0) { kfree(pages); SDMA_DBG(req, "pinned %d", pinned); return pinned; } if (pinned != npages) { unpin_vector_pages(current->mm, pages, node->npages, pinned); SDMA_DBG(req, "npages %u pinned %d", npages, pinned); return -EFAULT; } node->rb.addr = start_address; node->rb.len = length; node->pages = pages; node->npages = npages; atomic_add(pinned, &pq->n_locked); SDMA_DBG(req, "done. pinned %d", pinned); return 0; } /* * kref refcount on *node_p will be 2 on successful addition: one kref from * kref_init() for mmu_rb_handler and one kref to prevent *node_p from being * released until after *node_p is assigned to an SDMA descriptor (struct * sdma_desc) under add_system_iovec_to_sdma_packet(), even if the virtual * address range for *node_p is invalidated between now and then. */ static int add_system_pinning(struct user_sdma_request *req, struct sdma_mmu_node **node_p, unsigned long start, unsigned long len) { struct hfi1_user_sdma_pkt_q *pq = req->pq; struct sdma_mmu_node *node; int ret; node = kzalloc(sizeof(*node), GFP_KERNEL); if (!node) return -ENOMEM; /* First kref "moves" to mmu_rb_handler */ kref_init(&node->rb.refcount); /* "safety" kref to prevent release before add_system_iovec_to_sdma_packet() */ kref_get(&node->rb.refcount); node->pq = pq; ret = pin_system_pages(req, start, len, node, PFN_DOWN(len)); if (ret == 0) { ret = hfi1_mmu_rb_insert(pq->handler, &node->rb); if (ret) free_system_node(node); else *node_p = node; return ret; } kfree(node); return ret; } static int get_system_cache_entry(struct user_sdma_request *req, struct sdma_mmu_node **node_p, size_t req_start, size_t req_len) { struct hfi1_user_sdma_pkt_q *pq = req->pq; u64 start = ALIGN_DOWN(req_start, PAGE_SIZE); u64 end = PFN_ALIGN(req_start + req_len); int ret; if ((end - start) == 0) { SDMA_DBG(req, "Request for empty cache entry req_start %lx req_len %lx start %llx end %llx", req_start, req_len, start, end); return -EINVAL; } SDMA_DBG(req, "req_start %lx req_len %lu", req_start, req_len); while (1) { struct sdma_mmu_node *node = find_system_node(pq->handler, start, end); u64 prepend_len = 0; SDMA_DBG(req, "node %p start %llx end %llu", node, start, end); if (!node) { ret = add_system_pinning(req, node_p, start, end - start); if (ret == -EEXIST) { /* * Another execution context has inserted a * conficting entry first. */ continue; } return ret; } if (node->rb.addr <= start) { /* * This entry covers at least part of the region. If it doesn't extend * to the end, then this will be called again for the next segment. */ *node_p = node; return 0; } SDMA_DBG(req, "prepend: node->rb.addr %lx, node->rb.refcount %d", node->rb.addr, kref_read(&node->rb.refcount)); prepend_len = node->rb.addr - start; /* * This node will not be returned, instead a new node * will be. So release the reference. */ kref_put(&node->rb.refcount, hfi1_mmu_rb_release); /* Prepend a node to cover the beginning of the allocation */ ret = add_system_pinning(req, node_p, start, prepend_len); if (ret == -EEXIST) { /* Another execution context has inserted a conficting entry first. */ continue; } return ret; } } static void sdma_mmu_rb_node_get(void *ctx) { struct mmu_rb_node *node = ctx; kref_get(&node->refcount); } static void sdma_mmu_rb_node_put(void *ctx) { struct sdma_mmu_node *node = ctx; kref_put(&node->rb.refcount, hfi1_mmu_rb_release); } static int add_mapping_to_sdma_packet(struct user_sdma_request *req, struct user_sdma_txreq *tx, struct sdma_mmu_node *cache_entry, size_t start, size_t from_this_cache_entry) { struct hfi1_user_sdma_pkt_q *pq = req->pq; unsigned int page_offset; unsigned int from_this_page; size_t page_index; void *ctx; int ret; /* * Because the cache may be more fragmented than the memory that is being accessed, * it's not strictly necessary to have a descriptor per cache entry. */ while (from_this_cache_entry) { page_index = PFN_DOWN(start - cache_entry->rb.addr); if (page_index >= cache_entry->npages) { SDMA_DBG(req, "Request for page_index %zu >= cache_entry->npages %u", page_index, cache_entry->npages); return -EINVAL; } page_offset = start - ALIGN_DOWN(start, PAGE_SIZE); from_this_page = PAGE_SIZE - page_offset; if (from_this_page < from_this_cache_entry) { ctx = NULL; } else { /* * In the case they are equal the next line has no practical effect, * but it's better to do a register to register copy than a conditional * branch. */ from_this_page = from_this_cache_entry; ctx = cache_entry; } ret = sdma_txadd_page(pq->dd, &tx->txreq, cache_entry->pages[page_index], page_offset, from_this_page, ctx, sdma_mmu_rb_node_get, sdma_mmu_rb_node_put); if (ret) { /* * When there's a failure, the entire request is freed by * user_sdma_send_pkts(). */ SDMA_DBG(req, "sdma_txadd_page failed %d page_index %lu page_offset %u from_this_page %u", ret, page_index, page_offset, from_this_page); return ret; } start += from_this_page; from_this_cache_entry -= from_this_page; } return 0; } static int add_system_iovec_to_sdma_packet(struct user_sdma_request *req, struct user_sdma_txreq *tx, struct user_sdma_iovec *iovec, size_t from_this_iovec) { while (from_this_iovec > 0) { struct sdma_mmu_node *cache_entry; size_t from_this_cache_entry; size_t start; int ret; start = (uintptr_t)iovec->iov.iov_base + iovec->offset; ret = get_system_cache_entry(req, &cache_entry, start, from_this_iovec); if (ret) { SDMA_DBG(req, "pin system segment failed %d", ret); return ret; } from_this_cache_entry = cache_entry->rb.len - (start - cache_entry->rb.addr); if (from_this_cache_entry > from_this_iovec) from_this_cache_entry = from_this_iovec; ret = add_mapping_to_sdma_packet(req, tx, cache_entry, start, from_this_cache_entry); /* * Done adding cache_entry to zero or more sdma_desc. Can * kref_put() the "safety" kref taken under * get_system_cache_entry(). */ kref_put(&cache_entry->rb.refcount, hfi1_mmu_rb_release); if (ret) { SDMA_DBG(req, "add system segment failed %d", ret); return ret; } iovec->offset += from_this_cache_entry; from_this_iovec -= from_this_cache_entry; } return 0; } /* * Add up to pkt_data_remaining bytes to the txreq, starting at the current * offset in the given iovec entry and continuing until all data has been added * to the iovec or the iovec entry type changes. * * On success, prior to returning, adjust pkt_data_remaining, req->iov_idx, and * the offset value in req->iov[req->iov_idx] to reflect the data that has been * consumed. */ int hfi1_add_pages_to_sdma_packet(struct user_sdma_request *req, struct user_sdma_txreq *tx, struct user_sdma_iovec *iovec, u32 *pkt_data_remaining) { size_t remaining_to_add = *pkt_data_remaining; /* * Walk through iovec entries, ensure the associated pages * are pinned and mapped, add data to the packet until no more * data remains to be added or the iovec entry type changes. */ while (remaining_to_add > 0) { struct user_sdma_iovec *cur_iovec; size_t from_this_iovec; int ret; cur_iovec = iovec; from_this_iovec = iovec->iov.iov_len - iovec->offset; if (from_this_iovec > remaining_to_add) { from_this_iovec = remaining_to_add; } else { /* The current iovec entry will be consumed by this pass. */ req->iov_idx++; iovec++; } ret = add_system_iovec_to_sdma_packet(req, tx, cur_iovec, from_this_iovec); if (ret) return ret; remaining_to_add -= from_this_iovec; } *pkt_data_remaining = remaining_to_add; return 0; } static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr, unsigned long len) { return (bool)(node->addr == addr); } /* * Return 1 to remove the node from the rb tree and call the remove op. * * Called with the rb tree lock held. */ static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode, void *evict_arg, bool *stop) { struct sdma_mmu_node *node = container_of(mnode, struct sdma_mmu_node, rb); struct evict_data *evict_data = evict_arg; /* this node will be evicted, add its pages to our count */ evict_data->cleared += node->npages; /* have enough pages been cleared? */ if (evict_data->cleared >= evict_data->target) *stop = true; return 1; /* remove this node */ } static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode) { struct sdma_mmu_node *node = container_of(mnode, struct sdma_mmu_node, rb); free_system_node(node); }
linux-master
drivers/infiniband/hw/hfi1/pin_system.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2015 - 2020 Intel Corporation. */ #include <linux/err.h> #include <linux/vmalloc.h> #include <linux/hash.h> #include <linux/module.h> #include <linux/seq_file.h> #include <rdma/rdma_vt.h> #include <rdma/rdmavt_qp.h> #include <rdma/ib_verbs.h> #include "hfi.h" #include "qp.h" #include "trace.h" #include "verbs_txreq.h" unsigned int hfi1_qp_table_size = 256; module_param_named(qp_table_size, hfi1_qp_table_size, uint, S_IRUGO); MODULE_PARM_DESC(qp_table_size, "QP table size"); static void flush_tx_list(struct rvt_qp *qp); static int iowait_sleep( struct sdma_engine *sde, struct iowait_work *wait, struct sdma_txreq *stx, unsigned int seq, bool pkts_sent); static void iowait_wakeup(struct iowait *wait, int reason); static void iowait_sdma_drained(struct iowait *wait); static void qp_pio_drain(struct rvt_qp *qp); const struct rvt_operation_params hfi1_post_parms[RVT_OPERATION_MAX] = { [IB_WR_RDMA_WRITE] = { .length = sizeof(struct ib_rdma_wr), .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC), }, [IB_WR_RDMA_READ] = { .length = sizeof(struct ib_rdma_wr), .qpt_support = BIT(IB_QPT_RC), .flags = RVT_OPERATION_ATOMIC, }, [IB_WR_ATOMIC_CMP_AND_SWP] = { .length = sizeof(struct ib_atomic_wr), .qpt_support = BIT(IB_QPT_RC), .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE, }, [IB_WR_ATOMIC_FETCH_AND_ADD] = { .length = sizeof(struct ib_atomic_wr), .qpt_support = BIT(IB_QPT_RC), .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE, }, [IB_WR_RDMA_WRITE_WITH_IMM] = { .length = sizeof(struct ib_rdma_wr), .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC), }, [IB_WR_SEND] = { .length = sizeof(struct ib_send_wr), .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) | BIT(IB_QPT_UC) | BIT(IB_QPT_RC), }, [IB_WR_SEND_WITH_IMM] = { .length = sizeof(struct ib_send_wr), .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) | BIT(IB_QPT_UC) | BIT(IB_QPT_RC), }, [IB_WR_REG_MR] = { .length = sizeof(struct ib_reg_wr), .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC), .flags = RVT_OPERATION_LOCAL, }, [IB_WR_LOCAL_INV] = { .length = sizeof(struct ib_send_wr), .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC), .flags = RVT_OPERATION_LOCAL, }, [IB_WR_SEND_WITH_INV] = { .length = sizeof(struct ib_send_wr), .qpt_support = BIT(IB_QPT_RC), }, [IB_WR_OPFN] = { .length = sizeof(struct ib_atomic_wr), .qpt_support = BIT(IB_QPT_RC), .flags = RVT_OPERATION_USE_RESERVE, }, [IB_WR_TID_RDMA_WRITE] = { .length = sizeof(struct ib_rdma_wr), .qpt_support = BIT(IB_QPT_RC), .flags = RVT_OPERATION_IGN_RNR_CNT, }, }; static void flush_list_head(struct list_head *l) { while (!list_empty(l)) { struct sdma_txreq *tx; tx = list_first_entry( l, struct sdma_txreq, list); list_del_init(&tx->list); hfi1_put_txreq( container_of(tx, struct verbs_txreq, txreq)); } } static void flush_tx_list(struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; flush_list_head(&iowait_get_ib_work(&priv->s_iowait)->tx_head); flush_list_head(&iowait_get_tid_work(&priv->s_iowait)->tx_head); } static void flush_iowait(struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; unsigned long flags; seqlock_t *lock = priv->s_iowait.lock; if (!lock) return; write_seqlock_irqsave(lock, flags); if (!list_empty(&priv->s_iowait.list)) { list_del_init(&priv->s_iowait.list); priv->s_iowait.lock = NULL; rvt_put_qp(qp); } write_sequnlock_irqrestore(lock, flags); } /* * This function is what we would push to the core layer if we wanted to be a * "first class citizen". Instead we hide this here and rely on Verbs ULPs * to blindly pass the MTU enum value from the PathRecord to us. */ static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu) { /* Constraining 10KB packets to 8KB packets */ if (mtu == (enum ib_mtu)OPA_MTU_10240) mtu = (enum ib_mtu)OPA_MTU_8192; return opa_mtu_enum_to_int((enum opa_mtu)mtu); } int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { struct ib_qp *ibqp = &qp->ibqp; struct hfi1_ibdev *dev = to_idev(ibqp->device); struct hfi1_devdata *dd = dd_from_dev(dev); u8 sc; if (attr_mask & IB_QP_AV) { sc = ah_to_sc(ibqp->device, &attr->ah_attr); if (sc == 0xf) return -EINVAL; if (!qp_to_sdma_engine(qp, sc) && dd->flags & HFI1_HAS_SEND_DMA) return -EINVAL; if (!qp_to_send_context(qp, sc)) return -EINVAL; } if (attr_mask & IB_QP_ALT_PATH) { sc = ah_to_sc(ibqp->device, &attr->alt_ah_attr); if (sc == 0xf) return -EINVAL; if (!qp_to_sdma_engine(qp, sc) && dd->flags & HFI1_HAS_SEND_DMA) return -EINVAL; if (!qp_to_send_context(qp, sc)) return -EINVAL; } return 0; } /* * qp_set_16b - Set the hdr_type based on whether the slid or the * dlid in the connection is extended. Only applicable for RC and UC * QPs. UD QPs determine this on the fly from the ah in the wqe */ static inline void qp_set_16b(struct rvt_qp *qp) { struct hfi1_pportdata *ppd; struct hfi1_ibport *ibp; struct hfi1_qp_priv *priv = qp->priv; /* Update ah_attr to account for extended LIDs */ hfi1_update_ah_attr(qp->ibqp.device, &qp->remote_ah_attr); /* Create 32 bit LIDs */ hfi1_make_opa_lid(&qp->remote_ah_attr); if (!(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) return; ibp = to_iport(qp->ibqp.device, qp->port_num); ppd = ppd_from_ibp(ibp); priv->hdr_type = hfi1_get_hdr_type(ppd->lid, &qp->remote_ah_attr); } void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { struct ib_qp *ibqp = &qp->ibqp; struct hfi1_qp_priv *priv = qp->priv; if (attr_mask & IB_QP_AV) { priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr); priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc); qp_set_16b(qp); } if (attr_mask & IB_QP_PATH_MIG_STATE && attr->path_mig_state == IB_MIG_MIGRATED && qp->s_mig_state == IB_MIG_ARMED) { qp->s_flags |= HFI1_S_AHG_CLEAR; priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr); priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc); qp_set_16b(qp); } opfn_qp_init(qp, attr, attr_mask); } /** * hfi1_setup_wqe - set up the wqe * @qp: The qp * @wqe: The built wqe * @call_send: Determine if the send should be posted or scheduled. * * Perform setup of the wqe. This is called * prior to inserting the wqe into the ring but after * the wqe has been setup by RDMAVT. This function * allows the driver the opportunity to perform * validation and additional setup of the wqe. * * Returns 0 on success, -EINVAL on failure * */ int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe, bool *call_send) { struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); struct rvt_ah *ah; struct hfi1_pportdata *ppd; struct hfi1_devdata *dd; switch (qp->ibqp.qp_type) { case IB_QPT_RC: hfi1_setup_tid_rdma_wqe(qp, wqe); fallthrough; case IB_QPT_UC: if (wqe->length > 0x80000000U) return -EINVAL; if (wqe->length > qp->pmtu) *call_send = false; break; case IB_QPT_SMI: /* * SM packets should exclusively use VL15 and their SL is * ignored (IBTA v1.3, Section 3.5.8.2). Therefore, when ah * is created, SL is 0 in most cases and as a result some * fields (vl and pmtu) in ah may not be set correctly, * depending on the SL2SC and SC2VL tables at the time. */ ppd = ppd_from_ibp(ibp); dd = dd_from_ppd(ppd); if (wqe->length > dd->vld[15].mtu) return -EINVAL; break; case IB_QPT_GSI: case IB_QPT_UD: ah = rvt_get_swqe_ah(wqe); if (wqe->length > (1 << ah->log_pmtu)) return -EINVAL; if (ibp->sl_to_sc[rdma_ah_get_sl(&ah->attr)] == 0xf) return -EINVAL; break; default: break; } /* * System latency between send and schedule is large enough that * forcing call_send to true for piothreshold packets is necessary. */ if (wqe->length <= piothreshold) *call_send = true; return 0; } /** * _hfi1_schedule_send - schedule progress * @qp: the QP * * This schedules qp progress w/o regard to the s_flags. * * It is only used in the post send, which doesn't hold * the s_lock. */ bool _hfi1_schedule_send(struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); struct hfi1_devdata *dd = ppd->dd; if (dd->flags & HFI1_SHUTDOWN) return true; return iowait_schedule(&priv->s_iowait, ppd->hfi1_wq, priv->s_sde ? priv->s_sde->cpu : cpumask_first(cpumask_of_node(dd->node))); } static void qp_pio_drain(struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; if (!priv->s_sendcontext) return; while (iowait_pio_pending(&priv->s_iowait)) { write_seqlock_irq(&priv->s_sendcontext->waitlock); hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 1); write_sequnlock_irq(&priv->s_sendcontext->waitlock); iowait_pio_drain(&priv->s_iowait); write_seqlock_irq(&priv->s_sendcontext->waitlock); hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 0); write_sequnlock_irq(&priv->s_sendcontext->waitlock); } } /** * hfi1_schedule_send - schedule progress * @qp: the QP * * This schedules qp progress and caller should hold * the s_lock. * @return true if the first leg is scheduled; * false if the first leg is not scheduled. */ bool hfi1_schedule_send(struct rvt_qp *qp) { lockdep_assert_held(&qp->s_lock); if (hfi1_send_ok(qp)) { _hfi1_schedule_send(qp); return true; } if (qp->s_flags & HFI1_S_ANY_WAIT_IO) iowait_set_flag(&((struct hfi1_qp_priv *)qp->priv)->s_iowait, IOWAIT_PENDING_IB); return false; } static void hfi1_qp_schedule(struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; bool ret; if (iowait_flag_set(&priv->s_iowait, IOWAIT_PENDING_IB)) { ret = hfi1_schedule_send(qp); if (ret) iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_IB); } if (iowait_flag_set(&priv->s_iowait, IOWAIT_PENDING_TID)) { ret = hfi1_schedule_tid_send(qp); if (ret) iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_TID); } } void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag) { unsigned long flags; spin_lock_irqsave(&qp->s_lock, flags); if (qp->s_flags & flag) { qp->s_flags &= ~flag; trace_hfi1_qpwakeup(qp, flag); hfi1_qp_schedule(qp); } spin_unlock_irqrestore(&qp->s_lock, flags); /* Notify hfi1_destroy_qp() if it is waiting. */ rvt_put_qp(qp); } void hfi1_qp_unbusy(struct rvt_qp *qp, struct iowait_work *wait) { struct hfi1_qp_priv *priv = qp->priv; if (iowait_set_work_flag(wait) == IOWAIT_IB_SE) { qp->s_flags &= ~RVT_S_BUSY; /* * If we are sending a first-leg packet from the second leg, * we need to clear the busy flag from priv->s_flags to * avoid a race condition when the qp wakes up before * the call to hfi1_verbs_send() returns to the second * leg. In that case, the second leg will terminate without * being re-scheduled, resulting in failure to send TID RDMA * WRITE DATA and TID RDMA ACK packets. */ if (priv->s_flags & HFI1_S_TID_BUSY_SET) { priv->s_flags &= ~(HFI1_S_TID_BUSY_SET | RVT_S_BUSY); iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_TID); } } else { priv->s_flags &= ~RVT_S_BUSY; } } static int iowait_sleep( struct sdma_engine *sde, struct iowait_work *wait, struct sdma_txreq *stx, uint seq, bool pkts_sent) { struct verbs_txreq *tx = container_of(stx, struct verbs_txreq, txreq); struct rvt_qp *qp; struct hfi1_qp_priv *priv; unsigned long flags; int ret = 0; qp = tx->qp; priv = qp->priv; spin_lock_irqsave(&qp->s_lock, flags); if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { /* * If we couldn't queue the DMA request, save the info * and try again later rather than destroying the * buffer and undoing the side effects of the copy. */ /* Make a common routine? */ list_add_tail(&stx->list, &wait->tx_head); write_seqlock(&sde->waitlock); if (sdma_progress(sde, seq, stx)) goto eagain; if (list_empty(&priv->s_iowait.list)) { struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); ibp->rvp.n_dmawait++; qp->s_flags |= RVT_S_WAIT_DMA_DESC; iowait_get_priority(&priv->s_iowait); iowait_queue(pkts_sent, &priv->s_iowait, &sde->dmawait); priv->s_iowait.lock = &sde->waitlock; trace_hfi1_qpsleep(qp, RVT_S_WAIT_DMA_DESC); rvt_get_qp(qp); } write_sequnlock(&sde->waitlock); hfi1_qp_unbusy(qp, wait); spin_unlock_irqrestore(&qp->s_lock, flags); ret = -EBUSY; } else { spin_unlock_irqrestore(&qp->s_lock, flags); hfi1_put_txreq(tx); } return ret; eagain: write_sequnlock(&sde->waitlock); spin_unlock_irqrestore(&qp->s_lock, flags); list_del_init(&stx->list); return -EAGAIN; } static void iowait_wakeup(struct iowait *wait, int reason) { struct rvt_qp *qp = iowait_to_qp(wait); WARN_ON(reason != SDMA_AVAIL_REASON); hfi1_qp_wakeup(qp, RVT_S_WAIT_DMA_DESC); } static void iowait_sdma_drained(struct iowait *wait) { struct rvt_qp *qp = iowait_to_qp(wait); unsigned long flags; /* * This happens when the send engine notes * a QP in the error state and cannot * do the flush work until that QP's * sdma work has finished. */ spin_lock_irqsave(&qp->s_lock, flags); if (qp->s_flags & RVT_S_WAIT_DMA) { qp->s_flags &= ~RVT_S_WAIT_DMA; hfi1_schedule_send(qp); } spin_unlock_irqrestore(&qp->s_lock, flags); } static void hfi1_init_priority(struct iowait *w) { struct rvt_qp *qp = iowait_to_qp(w); struct hfi1_qp_priv *priv = qp->priv; if (qp->s_flags & RVT_S_ACK_PENDING) w->priority++; if (priv->s_flags & RVT_S_ACK_PENDING) w->priority++; } /** * qp_to_sdma_engine - map a qp to a send engine * @qp: the QP * @sc5: the 5 bit sc * * Return: * A send engine for the qp or NULL for SMI type qp. */ struct sdma_engine *qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5) { struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); struct sdma_engine *sde; if (!(dd->flags & HFI1_HAS_SEND_DMA)) return NULL; switch (qp->ibqp.qp_type) { case IB_QPT_SMI: return NULL; default: break; } sde = sdma_select_engine_sc(dd, qp->ibqp.qp_num >> dd->qos_shift, sc5); return sde; } /** * qp_to_send_context - map a qp to a send context * @qp: the QP * @sc5: the 5 bit sc * * Return: * A send context for the qp */ struct send_context *qp_to_send_context(struct rvt_qp *qp, u8 sc5) { struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); switch (qp->ibqp.qp_type) { case IB_QPT_SMI: /* SMA packets to VL15 */ return dd->vld[15].sc; default: break; } return pio_select_send_context_sc(dd, qp->ibqp.qp_num >> dd->qos_shift, sc5); } static const char * const qp_type_str[] = { "SMI", "GSI", "RC", "UC", "UD", }; static int qp_idle(struct rvt_qp *qp) { return qp->s_last == qp->s_acked && qp->s_acked == qp->s_cur && qp->s_cur == qp->s_tail && qp->s_tail == qp->s_head; } /** * qp_iter_print - print the qp information to seq_file * @s: the seq_file to emit the qp information on * @iter: the iterator for the qp hash list */ void qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter) { struct rvt_swqe *wqe; struct rvt_qp *qp = iter->qp; struct hfi1_qp_priv *priv = qp->priv; struct sdma_engine *sde; struct send_context *send_context; struct rvt_ack_entry *e = NULL; struct rvt_srq *srq = qp->ibqp.srq ? ibsrq_to_rvtsrq(qp->ibqp.srq) : NULL; sde = qp_to_sdma_engine(qp, priv->s_sc); wqe = rvt_get_swqe_ptr(qp, qp->s_last); send_context = qp_to_send_context(qp, priv->s_sc); if (qp->s_ack_queue) e = &qp->s_ack_queue[qp->s_tail_ack_queue]; seq_printf(s, "N %d %s QP %x R %u %s %u %u f=%x %u %u %u %u %u %u SPSN %x %x %x %x %x RPSN %x S(%u %u %u %u %u %u %u) R(%u %u %u) RQP %x LID %x SL %u MTU %u %u %u %u %u SDE %p,%u SC %p,%u SCQ %u %u PID %d OS %x %x E %x %x %x RNR %d %s %d\n", iter->n, qp_idle(qp) ? "I" : "B", qp->ibqp.qp_num, atomic_read(&qp->refcount), qp_type_str[qp->ibqp.qp_type], qp->state, wqe ? wqe->wr.opcode : 0, qp->s_flags, iowait_sdma_pending(&priv->s_iowait), iowait_pio_pending(&priv->s_iowait), !list_empty(&priv->s_iowait.list), qp->timeout, wqe ? wqe->ssn : 0, qp->s_lsn, qp->s_last_psn, qp->s_psn, qp->s_next_psn, qp->s_sending_psn, qp->s_sending_hpsn, qp->r_psn, qp->s_last, qp->s_acked, qp->s_cur, qp->s_tail, qp->s_head, qp->s_size, qp->s_avail, /* ack_queue ring pointers, size */ qp->s_tail_ack_queue, qp->r_head_ack_queue, rvt_max_atomic(&to_idev(qp->ibqp.device)->rdi), /* remote QP info */ qp->remote_qpn, rdma_ah_get_dlid(&qp->remote_ah_attr), rdma_ah_get_sl(&qp->remote_ah_attr), qp->pmtu, qp->s_retry, qp->s_retry_cnt, qp->s_rnr_retry_cnt, qp->s_rnr_retry, sde, sde ? sde->this_idx : 0, send_context, send_context ? send_context->sw_index : 0, ib_cq_head(qp->ibqp.send_cq), ib_cq_tail(qp->ibqp.send_cq), qp->pid, qp->s_state, qp->s_ack_state, /* ack queue information */ e ? e->opcode : 0, e ? e->psn : 0, e ? e->lpsn : 0, qp->r_min_rnr_timer, srq ? "SRQ" : "RQ", srq ? srq->rq.size : qp->r_rq.size ); } void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp) { struct hfi1_qp_priv *priv; priv = kzalloc_node(sizeof(*priv), GFP_KERNEL, rdi->dparms.node); if (!priv) return ERR_PTR(-ENOMEM); priv->owner = qp; priv->s_ahg = kzalloc_node(sizeof(*priv->s_ahg), GFP_KERNEL, rdi->dparms.node); if (!priv->s_ahg) { kfree(priv); return ERR_PTR(-ENOMEM); } iowait_init( &priv->s_iowait, 1, _hfi1_do_send, _hfi1_do_tid_send, iowait_sleep, iowait_wakeup, iowait_sdma_drained, hfi1_init_priority); /* Init to a value to start the running average correctly */ priv->s_running_pkt_size = piothreshold / 2; return priv; } void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; hfi1_qp_priv_tid_free(rdi, qp); kfree(priv->s_ahg); kfree(priv); } unsigned free_all_qps(struct rvt_dev_info *rdi) { struct hfi1_ibdev *verbs_dev = container_of(rdi, struct hfi1_ibdev, rdi); struct hfi1_devdata *dd = container_of(verbs_dev, struct hfi1_devdata, verbs_dev); int n; unsigned qp_inuse = 0; for (n = 0; n < dd->num_pports; n++) { struct hfi1_ibport *ibp = &dd->pport[n].ibport_data; rcu_read_lock(); if (rcu_dereference(ibp->rvp.qp[0])) qp_inuse++; if (rcu_dereference(ibp->rvp.qp[1])) qp_inuse++; rcu_read_unlock(); } return qp_inuse; } void flush_qp_waiters(struct rvt_qp *qp) { lockdep_assert_held(&qp->s_lock); flush_iowait(qp); hfi1_tid_rdma_flush_wait(qp); } void stop_send_queue(struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; iowait_cancel_work(&priv->s_iowait); if (cancel_work_sync(&priv->tid_rdma.trigger_work)) rvt_put_qp(qp); } void quiesce_qp(struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; hfi1_del_tid_reap_timer(qp); hfi1_del_tid_retry_timer(qp); iowait_sdma_drain(&priv->s_iowait); qp_pio_drain(qp); flush_tx_list(qp); } void notify_qp_reset(struct rvt_qp *qp) { hfi1_qp_kern_exp_rcv_clear_all(qp); qp->r_adefered = 0; clear_ahg(qp); /* Clear any OPFN state */ if (qp->ibqp.qp_type == IB_QPT_RC) opfn_conn_error(qp); } /* * Switch to alternate path. * The QP s_lock should be held and interrupts disabled. */ void hfi1_migrate_qp(struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; struct ib_event ev; qp->s_mig_state = IB_MIG_MIGRATED; qp->remote_ah_attr = qp->alt_ah_attr; qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr); qp->s_pkey_index = qp->s_alt_pkey_index; qp->s_flags |= HFI1_S_AHG_CLEAR; priv->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr); priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); qp_set_16b(qp); ev.device = qp->ibqp.device; ev.element.qp = &qp->ibqp; ev.event = IB_EVENT_PATH_MIG; qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); } int mtu_to_path_mtu(u32 mtu) { return mtu_to_enum(mtu, OPA_MTU_8192); } u32 mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu) { u32 mtu; struct hfi1_ibdev *verbs_dev = container_of(rdi, struct hfi1_ibdev, rdi); struct hfi1_devdata *dd = container_of(verbs_dev, struct hfi1_devdata, verbs_dev); struct hfi1_ibport *ibp; u8 sc, vl; ibp = &dd->pport[qp->port_num - 1].ibport_data; sc = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)]; vl = sc_to_vlt(dd, sc); mtu = verbs_mtu_enum_to_int(qp->ibqp.device, pmtu); if (vl < PER_VL_SEND_CONTEXTS) mtu = min_t(u32, mtu, dd->vld[vl].mtu); return mtu; } int get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp, struct ib_qp_attr *attr) { int mtu, pidx = qp->port_num - 1; struct hfi1_ibdev *verbs_dev = container_of(rdi, struct hfi1_ibdev, rdi); struct hfi1_devdata *dd = container_of(verbs_dev, struct hfi1_devdata, verbs_dev); mtu = verbs_mtu_enum_to_int(qp->ibqp.device, attr->path_mtu); if (mtu == -1) return -1; /* values less than 0 are error */ if (mtu > dd->pport[pidx].ibmtu) return mtu_to_enum(dd->pport[pidx].ibmtu, IB_MTU_2048); else return attr->path_mtu; } void notify_error_qp(struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; seqlock_t *lock = priv->s_iowait.lock; if (lock) { write_seqlock(lock); if (!list_empty(&priv->s_iowait.list) && !(qp->s_flags & RVT_S_BUSY) && !(priv->s_flags & RVT_S_BUSY)) { qp->s_flags &= ~HFI1_S_ANY_WAIT_IO; iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_IB); iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_TID); list_del_init(&priv->s_iowait.list); priv->s_iowait.lock = NULL; rvt_put_qp(qp); } write_sequnlock(lock); } if (!(qp->s_flags & RVT_S_BUSY) && !(priv->s_flags & RVT_S_BUSY)) { qp->s_hdrwords = 0; if (qp->s_rdma_mr) { rvt_put_mr(qp->s_rdma_mr); qp->s_rdma_mr = NULL; } flush_tx_list(qp); } } /** * hfi1_qp_iter_cb - callback for iterator * @qp: the qp * @v: the sl in low bits of v * * This is called from the iterator callback to work * on an individual qp. */ static void hfi1_qp_iter_cb(struct rvt_qp *qp, u64 v) { int lastwqe; struct ib_event ev; struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); u8 sl = (u8)v; if (qp->port_num != ppd->port || (qp->ibqp.qp_type != IB_QPT_UC && qp->ibqp.qp_type != IB_QPT_RC) || rdma_ah_get_sl(&qp->remote_ah_attr) != sl || !(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK)) return; spin_lock_irq(&qp->r_lock); spin_lock(&qp->s_hlock); spin_lock(&qp->s_lock); lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); spin_unlock(&qp->s_lock); spin_unlock(&qp->s_hlock); spin_unlock_irq(&qp->r_lock); if (lastwqe) { ev.device = qp->ibqp.device; ev.element.qp = &qp->ibqp; ev.event = IB_EVENT_QP_LAST_WQE_REACHED; qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); } } /** * hfi1_error_port_qps - put a port's RC/UC qps into error state * @ibp: the ibport. * @sl: the service level. * * This function places all RC/UC qps with a given service level into error * state. It is generally called to force upper lay apps to abandon stale qps * after an sl->sc mapping change. */ void hfi1_error_port_qps(struct hfi1_ibport *ibp, u8 sl) { struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); struct hfi1_ibdev *dev = &ppd->dd->verbs_dev; rvt_qp_iter(&dev->rdi, sl, hfi1_qp_iter_cb); }
linux-master
drivers/infiniband/hw/hfi1/qp.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2017 Intel Corporation. */ #include "exp_rcv.h" #include "trace.h" /** * hfi1_exp_tid_set_init - initialize exp_tid_set * @set: the set */ static void hfi1_exp_tid_set_init(struct exp_tid_set *set) { INIT_LIST_HEAD(&set->list); set->count = 0; } /** * hfi1_exp_tid_group_init - initialize rcd expected receive * @rcd: the rcd */ void hfi1_exp_tid_group_init(struct hfi1_ctxtdata *rcd) { hfi1_exp_tid_set_init(&rcd->tid_group_list); hfi1_exp_tid_set_init(&rcd->tid_used_list); hfi1_exp_tid_set_init(&rcd->tid_full_list); } /** * hfi1_alloc_ctxt_rcv_groups - initialize expected receive groups * @rcd: the context to add the groupings to */ int hfi1_alloc_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd) { struct hfi1_devdata *dd = rcd->dd; u32 tidbase; struct tid_group *grp; int i; u32 ngroups; ngroups = rcd->expected_count / dd->rcv_entries.group_size; rcd->groups = kcalloc_node(ngroups, sizeof(*rcd->groups), GFP_KERNEL, rcd->numa_id); if (!rcd->groups) return -ENOMEM; tidbase = rcd->expected_base; for (i = 0; i < ngroups; i++) { grp = &rcd->groups[i]; grp->size = dd->rcv_entries.group_size; grp->base = tidbase; tid_group_add_tail(grp, &rcd->tid_group_list); tidbase += dd->rcv_entries.group_size; } return 0; } /** * hfi1_free_ctxt_rcv_groups - free expected receive groups * @rcd: the context to free * * The routine dismantles the expect receive linked * list and clears any tids associated with the receive * context. * * This should only be called for kernel contexts and the * a base user context. */ void hfi1_free_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd) { kfree(rcd->groups); rcd->groups = NULL; hfi1_exp_tid_group_init(rcd); hfi1_clear_tids(rcd); }
linux-master
drivers/infiniband/hw/hfi1/exp_rcv.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2017 - 2020 Intel Corporation. */ /* * This file contains HFI1 support for VNIC functionality */ #include <linux/io.h> #include <linux/if_vlan.h> #include "vnic.h" #include "netdev.h" #define HFI_TX_TIMEOUT_MS 1000 #define HFI1_VNIC_RCV_Q_SIZE 1024 #define HFI1_VNIC_UP 0 static DEFINE_SPINLOCK(vport_cntr_lock); #define SUM_GRP_COUNTERS(stats, qstats, x_grp) do { \ u64 *src64, *dst64; \ for (src64 = &qstats->x_grp.unicast, \ dst64 = &stats->x_grp.unicast; \ dst64 <= &stats->x_grp.s_1519_max;) { \ *dst64++ += *src64++; \ } \ } while (0) #define VNIC_MASK (0xFF) #define VNIC_ID(val) ((1ull << 24) | ((val) & VNIC_MASK)) /* hfi1_vnic_update_stats - update statistics */ static void hfi1_vnic_update_stats(struct hfi1_vnic_vport_info *vinfo, struct opa_vnic_stats *stats) { struct net_device *netdev = vinfo->netdev; u8 i; /* add tx counters on different queues */ for (i = 0; i < vinfo->num_tx_q; i++) { struct opa_vnic_stats *qstats = &vinfo->stats[i]; struct rtnl_link_stats64 *qnstats = &vinfo->stats[i].netstats; stats->netstats.tx_fifo_errors += qnstats->tx_fifo_errors; stats->netstats.tx_carrier_errors += qnstats->tx_carrier_errors; stats->tx_drop_state += qstats->tx_drop_state; stats->tx_dlid_zero += qstats->tx_dlid_zero; SUM_GRP_COUNTERS(stats, qstats, tx_grp); stats->netstats.tx_packets += qnstats->tx_packets; stats->netstats.tx_bytes += qnstats->tx_bytes; } /* add rx counters on different queues */ for (i = 0; i < vinfo->num_rx_q; i++) { struct opa_vnic_stats *qstats = &vinfo->stats[i]; struct rtnl_link_stats64 *qnstats = &vinfo->stats[i].netstats; stats->netstats.rx_fifo_errors += qnstats->rx_fifo_errors; stats->netstats.rx_nohandler += qnstats->rx_nohandler; stats->rx_drop_state += qstats->rx_drop_state; stats->rx_oversize += qstats->rx_oversize; stats->rx_runt += qstats->rx_runt; SUM_GRP_COUNTERS(stats, qstats, rx_grp); stats->netstats.rx_packets += qnstats->rx_packets; stats->netstats.rx_bytes += qnstats->rx_bytes; } stats->netstats.tx_errors = stats->netstats.tx_fifo_errors + stats->netstats.tx_carrier_errors + stats->tx_drop_state + stats->tx_dlid_zero; stats->netstats.tx_dropped = stats->netstats.tx_errors; stats->netstats.rx_errors = stats->netstats.rx_fifo_errors + stats->netstats.rx_nohandler + stats->rx_drop_state + stats->rx_oversize + stats->rx_runt; stats->netstats.rx_dropped = stats->netstats.rx_errors; netdev->stats.tx_packets = stats->netstats.tx_packets; netdev->stats.tx_bytes = stats->netstats.tx_bytes; netdev->stats.tx_fifo_errors = stats->netstats.tx_fifo_errors; netdev->stats.tx_carrier_errors = stats->netstats.tx_carrier_errors; netdev->stats.tx_errors = stats->netstats.tx_errors; netdev->stats.tx_dropped = stats->netstats.tx_dropped; netdev->stats.rx_packets = stats->netstats.rx_packets; netdev->stats.rx_bytes = stats->netstats.rx_bytes; netdev->stats.rx_fifo_errors = stats->netstats.rx_fifo_errors; netdev->stats.multicast = stats->rx_grp.mcastbcast; netdev->stats.rx_length_errors = stats->rx_oversize + stats->rx_runt; netdev->stats.rx_errors = stats->netstats.rx_errors; netdev->stats.rx_dropped = stats->netstats.rx_dropped; } /* update_len_counters - update pkt's len histogram counters */ static inline void update_len_counters(struct opa_vnic_grp_stats *grp, int len) { /* account for 4 byte FCS */ if (len >= 1515) grp->s_1519_max++; else if (len >= 1020) grp->s_1024_1518++; else if (len >= 508) grp->s_512_1023++; else if (len >= 252) grp->s_256_511++; else if (len >= 124) grp->s_128_255++; else if (len >= 61) grp->s_65_127++; else grp->s_64++; } /* hfi1_vnic_update_tx_counters - update transmit counters */ static void hfi1_vnic_update_tx_counters(struct hfi1_vnic_vport_info *vinfo, u8 q_idx, struct sk_buff *skb, int err) { struct ethhdr *mac_hdr = (struct ethhdr *)skb_mac_header(skb); struct opa_vnic_stats *stats = &vinfo->stats[q_idx]; struct opa_vnic_grp_stats *tx_grp = &stats->tx_grp; u16 vlan_tci; stats->netstats.tx_packets++; stats->netstats.tx_bytes += skb->len + ETH_FCS_LEN; update_len_counters(tx_grp, skb->len); /* rest of the counts are for good packets only */ if (unlikely(err)) return; if (is_multicast_ether_addr(mac_hdr->h_dest)) tx_grp->mcastbcast++; else tx_grp->unicast++; if (!__vlan_get_tag(skb, &vlan_tci)) tx_grp->vlan++; else tx_grp->untagged++; } /* hfi1_vnic_update_rx_counters - update receive counters */ static void hfi1_vnic_update_rx_counters(struct hfi1_vnic_vport_info *vinfo, u8 q_idx, struct sk_buff *skb, int err) { struct ethhdr *mac_hdr = (struct ethhdr *)skb->data; struct opa_vnic_stats *stats = &vinfo->stats[q_idx]; struct opa_vnic_grp_stats *rx_grp = &stats->rx_grp; u16 vlan_tci; stats->netstats.rx_packets++; stats->netstats.rx_bytes += skb->len + ETH_FCS_LEN; update_len_counters(rx_grp, skb->len); /* rest of the counts are for good packets only */ if (unlikely(err)) return; if (is_multicast_ether_addr(mac_hdr->h_dest)) rx_grp->mcastbcast++; else rx_grp->unicast++; if (!__vlan_get_tag(skb, &vlan_tci)) rx_grp->vlan++; else rx_grp->untagged++; } /* This function is overloaded for opa_vnic specific implementation */ static void hfi1_vnic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct opa_vnic_stats *vstats = (struct opa_vnic_stats *)stats; struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev); hfi1_vnic_update_stats(vinfo, vstats); } static u64 create_bypass_pbc(u32 vl, u32 dw_len) { u64 pbc; pbc = ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT) | PBC_INSERT_BYPASS_ICRC | PBC_CREDIT_RETURN | PBC_PACKET_BYPASS | ((vl & PBC_VL_MASK) << PBC_VL_SHIFT) | (dw_len & PBC_LENGTH_DWS_MASK) << PBC_LENGTH_DWS_SHIFT; return pbc; } /* hfi1_vnic_maybe_stop_tx - stop tx queue if required */ static void hfi1_vnic_maybe_stop_tx(struct hfi1_vnic_vport_info *vinfo, u8 q_idx) { netif_stop_subqueue(vinfo->netdev, q_idx); if (!hfi1_vnic_sdma_write_avail(vinfo, q_idx)) return; netif_start_subqueue(vinfo->netdev, q_idx); } static netdev_tx_t hfi1_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev); u8 pad_len, q_idx = skb->queue_mapping; struct hfi1_devdata *dd = vinfo->dd; struct opa_vnic_skb_mdata *mdata; u32 pkt_len, total_len; int err = -EINVAL; u64 pbc; v_dbg("xmit: queue %d skb len %d\n", q_idx, skb->len); if (unlikely(!netif_oper_up(netdev))) { vinfo->stats[q_idx].tx_drop_state++; goto tx_finish; } /* take out meta data */ mdata = (struct opa_vnic_skb_mdata *)skb->data; skb_pull(skb, sizeof(*mdata)); if (unlikely(mdata->flags & OPA_VNIC_SKB_MDATA_ENCAP_ERR)) { vinfo->stats[q_idx].tx_dlid_zero++; goto tx_finish; } /* add tail padding (for 8 bytes size alignment) and icrc */ pad_len = -(skb->len + OPA_VNIC_ICRC_TAIL_LEN) & 0x7; pad_len += OPA_VNIC_ICRC_TAIL_LEN; /* * pkt_len is how much data we have to write, includes header and data. * total_len is length of the packet in Dwords plus the PBC should not * include the CRC. */ pkt_len = (skb->len + pad_len) >> 2; total_len = pkt_len + 2; /* PBC + packet */ pbc = create_bypass_pbc(mdata->vl, total_len); skb_get(skb); v_dbg("pbc 0x%016llX len %d pad_len %d\n", pbc, skb->len, pad_len); err = dd->process_vnic_dma_send(dd, q_idx, vinfo, skb, pbc, pad_len); if (unlikely(err)) { if (err == -ENOMEM) vinfo->stats[q_idx].netstats.tx_fifo_errors++; else if (err != -EBUSY) vinfo->stats[q_idx].netstats.tx_carrier_errors++; } /* remove the header before updating tx counters */ skb_pull(skb, OPA_VNIC_HDR_LEN); if (unlikely(err == -EBUSY)) { hfi1_vnic_maybe_stop_tx(vinfo, q_idx); dev_kfree_skb_any(skb); return NETDEV_TX_BUSY; } tx_finish: /* update tx counters */ hfi1_vnic_update_tx_counters(vinfo, q_idx, skb, err); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } static u16 hfi1_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb, struct net_device *sb_dev) { struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev); struct opa_vnic_skb_mdata *mdata; struct sdma_engine *sde; mdata = (struct opa_vnic_skb_mdata *)skb->data; sde = sdma_select_engine_vl(vinfo->dd, mdata->entropy, mdata->vl); return sde->this_idx; } /* hfi1_vnic_decap_skb - strip OPA header from the skb (ethernet) packet */ static inline int hfi1_vnic_decap_skb(struct hfi1_vnic_rx_queue *rxq, struct sk_buff *skb) { struct hfi1_vnic_vport_info *vinfo = rxq->vinfo; int max_len = vinfo->netdev->mtu + VLAN_ETH_HLEN; int rc = -EFAULT; skb_pull(skb, OPA_VNIC_HDR_LEN); /* Validate Packet length */ if (unlikely(skb->len > max_len)) vinfo->stats[rxq->idx].rx_oversize++; else if (unlikely(skb->len < ETH_ZLEN)) vinfo->stats[rxq->idx].rx_runt++; else rc = 0; return rc; } static struct hfi1_vnic_vport_info *get_vnic_port(struct hfi1_devdata *dd, int vesw_id) { int vnic_id = VNIC_ID(vesw_id); return hfi1_netdev_get_data(dd, vnic_id); } static struct hfi1_vnic_vport_info *get_first_vnic_port(struct hfi1_devdata *dd) { struct hfi1_vnic_vport_info *vinfo; int next_id = VNIC_ID(0); vinfo = hfi1_netdev_get_first_data(dd, &next_id); if (next_id > VNIC_ID(VNIC_MASK)) return NULL; return vinfo; } void hfi1_vnic_bypass_rcv(struct hfi1_packet *packet) { struct hfi1_devdata *dd = packet->rcd->dd; struct hfi1_vnic_vport_info *vinfo = NULL; struct hfi1_vnic_rx_queue *rxq; struct sk_buff *skb; int l4_type, vesw_id = -1, rc; u8 q_idx; unsigned char *pad_info; l4_type = hfi1_16B_get_l4(packet->ebuf); if (likely(l4_type == OPA_16B_L4_ETHR)) { vesw_id = HFI1_VNIC_GET_VESWID(packet->ebuf); vinfo = get_vnic_port(dd, vesw_id); /* * In case of invalid vesw id, count the error on * the first available vport. */ if (unlikely(!vinfo)) { struct hfi1_vnic_vport_info *vinfo_tmp; vinfo_tmp = get_first_vnic_port(dd); if (vinfo_tmp) { spin_lock(&vport_cntr_lock); vinfo_tmp->stats[0].netstats.rx_nohandler++; spin_unlock(&vport_cntr_lock); } } } if (unlikely(!vinfo)) { dd_dev_warn(dd, "vnic rcv err: l4 %d vesw id %d ctx %d\n", l4_type, vesw_id, packet->rcd->ctxt); return; } q_idx = packet->rcd->vnic_q_idx; rxq = &vinfo->rxq[q_idx]; if (unlikely(!netif_oper_up(vinfo->netdev))) { vinfo->stats[q_idx].rx_drop_state++; return; } skb = netdev_alloc_skb(vinfo->netdev, packet->tlen); if (unlikely(!skb)) { vinfo->stats[q_idx].netstats.rx_fifo_errors++; return; } memcpy(skb->data, packet->ebuf, packet->tlen); skb_put(skb, packet->tlen); pad_info = skb->data + skb->len - 1; skb_trim(skb, (skb->len - OPA_VNIC_ICRC_TAIL_LEN - ((*pad_info) & 0x7))); rc = hfi1_vnic_decap_skb(rxq, skb); /* update rx counters */ hfi1_vnic_update_rx_counters(vinfo, rxq->idx, skb, rc); if (unlikely(rc)) { dev_kfree_skb_any(skb); return; } skb_checksum_none_assert(skb); skb->protocol = eth_type_trans(skb, rxq->netdev); napi_gro_receive(&rxq->napi, skb); } static int hfi1_vnic_up(struct hfi1_vnic_vport_info *vinfo) { struct hfi1_devdata *dd = vinfo->dd; struct net_device *netdev = vinfo->netdev; int rc; /* ensure virtual eth switch id is valid */ if (!vinfo->vesw_id) return -EINVAL; rc = hfi1_netdev_add_data(dd, VNIC_ID(vinfo->vesw_id), vinfo); if (rc < 0) return rc; rc = hfi1_netdev_rx_init(dd); if (rc) goto err_remove; netif_carrier_on(netdev); netif_tx_start_all_queues(netdev); set_bit(HFI1_VNIC_UP, &vinfo->flags); return 0; err_remove: hfi1_netdev_remove_data(dd, VNIC_ID(vinfo->vesw_id)); return rc; } static void hfi1_vnic_down(struct hfi1_vnic_vport_info *vinfo) { struct hfi1_devdata *dd = vinfo->dd; clear_bit(HFI1_VNIC_UP, &vinfo->flags); netif_carrier_off(vinfo->netdev); netif_tx_disable(vinfo->netdev); hfi1_netdev_remove_data(dd, VNIC_ID(vinfo->vesw_id)); hfi1_netdev_rx_destroy(dd); } static int hfi1_netdev_open(struct net_device *netdev) { struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev); int rc; mutex_lock(&vinfo->lock); rc = hfi1_vnic_up(vinfo); mutex_unlock(&vinfo->lock); return rc; } static int hfi1_netdev_close(struct net_device *netdev) { struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev); mutex_lock(&vinfo->lock); if (test_bit(HFI1_VNIC_UP, &vinfo->flags)) hfi1_vnic_down(vinfo); mutex_unlock(&vinfo->lock); return 0; } static int hfi1_vnic_init(struct hfi1_vnic_vport_info *vinfo) { struct hfi1_devdata *dd = vinfo->dd; int rc = 0; mutex_lock(&hfi1_mutex); if (!dd->vnic_num_vports) { rc = hfi1_vnic_txreq_init(dd); if (rc) goto txreq_fail; } rc = hfi1_netdev_rx_init(dd); if (rc) { dd_dev_err(dd, "Unable to initialize netdev contexts\n"); goto alloc_fail; } hfi1_init_vnic_rsm(dd); dd->vnic_num_vports++; hfi1_vnic_sdma_init(vinfo); alloc_fail: if (!dd->vnic_num_vports) hfi1_vnic_txreq_deinit(dd); txreq_fail: mutex_unlock(&hfi1_mutex); return rc; } static void hfi1_vnic_deinit(struct hfi1_vnic_vport_info *vinfo) { struct hfi1_devdata *dd = vinfo->dd; mutex_lock(&hfi1_mutex); if (--dd->vnic_num_vports == 0) { hfi1_deinit_vnic_rsm(dd); hfi1_vnic_txreq_deinit(dd); } mutex_unlock(&hfi1_mutex); hfi1_netdev_rx_destroy(dd); } static void hfi1_vnic_set_vesw_id(struct net_device *netdev, int id) { struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev); bool reopen = false; /* * If vesw_id is being changed, and if the vnic port is up, * reset the vnic port to ensure new vesw_id gets picked up */ if (id != vinfo->vesw_id) { mutex_lock(&vinfo->lock); if (test_bit(HFI1_VNIC_UP, &vinfo->flags)) { hfi1_vnic_down(vinfo); reopen = true; } vinfo->vesw_id = id; if (reopen) hfi1_vnic_up(vinfo); mutex_unlock(&vinfo->lock); } } /* netdev ops */ static const struct net_device_ops hfi1_netdev_ops = { .ndo_open = hfi1_netdev_open, .ndo_stop = hfi1_netdev_close, .ndo_start_xmit = hfi1_netdev_start_xmit, .ndo_select_queue = hfi1_vnic_select_queue, .ndo_get_stats64 = hfi1_vnic_get_stats64, }; static void hfi1_vnic_free_rn(struct net_device *netdev) { struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev); hfi1_vnic_deinit(vinfo); mutex_destroy(&vinfo->lock); free_netdev(netdev); } struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device, u32 port_num, enum rdma_netdev_t type, const char *name, unsigned char name_assign_type, void (*setup)(struct net_device *)) { struct hfi1_devdata *dd = dd_from_ibdev(device); struct hfi1_vnic_vport_info *vinfo; struct net_device *netdev; struct rdma_netdev *rn; int i, size, rc; if (!dd->num_netdev_contexts) return ERR_PTR(-ENOMEM); if (!port_num || (port_num > dd->num_pports)) return ERR_PTR(-EINVAL); if (type != RDMA_NETDEV_OPA_VNIC) return ERR_PTR(-EOPNOTSUPP); size = sizeof(struct opa_vnic_rdma_netdev) + sizeof(*vinfo); netdev = alloc_netdev_mqs(size, name, name_assign_type, setup, chip_sdma_engines(dd), dd->num_netdev_contexts); if (!netdev) return ERR_PTR(-ENOMEM); rn = netdev_priv(netdev); vinfo = opa_vnic_dev_priv(netdev); vinfo->dd = dd; vinfo->num_tx_q = chip_sdma_engines(dd); vinfo->num_rx_q = dd->num_netdev_contexts; vinfo->netdev = netdev; rn->free_rdma_netdev = hfi1_vnic_free_rn; rn->set_id = hfi1_vnic_set_vesw_id; netdev->features = NETIF_F_HIGHDMA | NETIF_F_SG; netdev->hw_features = netdev->features; netdev->vlan_features = netdev->features; netdev->watchdog_timeo = msecs_to_jiffies(HFI_TX_TIMEOUT_MS); netdev->netdev_ops = &hfi1_netdev_ops; mutex_init(&vinfo->lock); for (i = 0; i < vinfo->num_rx_q; i++) { struct hfi1_vnic_rx_queue *rxq = &vinfo->rxq[i]; rxq->idx = i; rxq->vinfo = vinfo; rxq->netdev = netdev; } rc = hfi1_vnic_init(vinfo); if (rc) goto init_fail; return netdev; init_fail: mutex_destroy(&vinfo->lock); free_netdev(netdev); return ERR_PTR(rc); }
linux-master
drivers/infiniband/hw/hfi1/vnic_main.c
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) /* * Copyright(c) 2020 Intel Corporation. * */ /* * This file contains HFI1 support for netdev RX functionality */ #include "sdma.h" #include "verbs.h" #include "netdev.h" #include "hfi.h" #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <rdma/ib_verbs.h> static int hfi1_netdev_setup_ctxt(struct hfi1_netdev_rx *rx, struct hfi1_ctxtdata *uctxt) { unsigned int rcvctrl_ops; struct hfi1_devdata *dd = rx->dd; int ret; uctxt->rhf_rcv_function_map = netdev_rhf_rcv_functions; uctxt->do_interrupt = &handle_receive_interrupt_napi_sp; /* Now allocate the RcvHdr queue and eager buffers. */ ret = hfi1_create_rcvhdrq(dd, uctxt); if (ret) goto done; ret = hfi1_setup_eagerbufs(uctxt); if (ret) goto done; clear_rcvhdrtail(uctxt); rcvctrl_ops = HFI1_RCVCTRL_CTXT_DIS; rcvctrl_ops |= HFI1_RCVCTRL_INTRAVAIL_DIS; if (!HFI1_CAP_KGET_MASK(uctxt->flags, MULTI_PKT_EGR)) rcvctrl_ops |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB; if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_EGR_FULL)) rcvctrl_ops |= HFI1_RCVCTRL_NO_EGR_DROP_ENB; if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_RHQ_FULL)) rcvctrl_ops |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB; if (HFI1_CAP_KGET_MASK(uctxt->flags, DMA_RTAIL)) rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB; hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt); done: return ret; } static int hfi1_netdev_allocate_ctxt(struct hfi1_devdata *dd, struct hfi1_ctxtdata **ctxt) { struct hfi1_ctxtdata *uctxt; int ret; if (dd->flags & HFI1_FROZEN) return -EIO; ret = hfi1_create_ctxtdata(dd->pport, dd->node, &uctxt); if (ret < 0) { dd_dev_err(dd, "Unable to create ctxtdata, failing open\n"); return -ENOMEM; } uctxt->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) | HFI1_CAP_KGET(NODROP_RHQ_FULL) | HFI1_CAP_KGET(NODROP_EGR_FULL) | HFI1_CAP_KGET(DMA_RTAIL); /* Netdev contexts are always NO_RDMA_RTAIL */ uctxt->fast_handler = handle_receive_interrupt_napi_fp; uctxt->slow_handler = handle_receive_interrupt_napi_sp; hfi1_set_seq_cnt(uctxt, 1); uctxt->is_vnic = true; hfi1_stats.sps_ctxts++; dd_dev_info(dd, "created netdev context %d\n", uctxt->ctxt); *ctxt = uctxt; return 0; } static void hfi1_netdev_deallocate_ctxt(struct hfi1_devdata *dd, struct hfi1_ctxtdata *uctxt) { flush_wc(); /* * Disable receive context and interrupt available, reset all * RcvCtxtCtrl bits to default values. */ hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS | HFI1_RCVCTRL_TIDFLOW_DIS | HFI1_RCVCTRL_INTRAVAIL_DIS | HFI1_RCVCTRL_ONE_PKT_EGR_DIS | HFI1_RCVCTRL_NO_RHQ_DROP_DIS | HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt); if (uctxt->msix_intr != CCE_NUM_MSIX_VECTORS) msix_free_irq(dd, uctxt->msix_intr); uctxt->msix_intr = CCE_NUM_MSIX_VECTORS; uctxt->event_flags = 0; hfi1_clear_tids(uctxt); hfi1_clear_ctxt_pkey(dd, uctxt); hfi1_stats.sps_ctxts--; hfi1_free_ctxt(uctxt); } static int hfi1_netdev_allot_ctxt(struct hfi1_netdev_rx *rx, struct hfi1_ctxtdata **ctxt) { int rc; struct hfi1_devdata *dd = rx->dd; rc = hfi1_netdev_allocate_ctxt(dd, ctxt); if (rc) { dd_dev_err(dd, "netdev ctxt alloc failed %d\n", rc); return rc; } rc = hfi1_netdev_setup_ctxt(rx, *ctxt); if (rc) { dd_dev_err(dd, "netdev ctxt setup failed %d\n", rc); hfi1_netdev_deallocate_ctxt(dd, *ctxt); *ctxt = NULL; } return rc; } /** * hfi1_num_netdev_contexts - Count of netdev recv contexts to use. * @dd: device on which to allocate netdev contexts * @available_contexts: count of available receive contexts * @cpu_mask: mask of possible cpus to include for contexts * * Return: count of physical cores on a node or the remaining available recv * contexts for netdev recv context usage up to the maximum of * HFI1_MAX_NETDEV_CTXTS. * A value of 0 can be returned when acceleration is explicitly turned off, * a memory allocation error occurs or when there are no available contexts. * */ u32 hfi1_num_netdev_contexts(struct hfi1_devdata *dd, u32 available_contexts, struct cpumask *cpu_mask) { cpumask_var_t node_cpu_mask; unsigned int available_cpus; if (!HFI1_CAP_IS_KSET(AIP)) return 0; /* Always give user contexts priority over netdev contexts */ if (available_contexts == 0) { dd_dev_info(dd, "No receive contexts available for netdevs.\n"); return 0; } if (!zalloc_cpumask_var(&node_cpu_mask, GFP_KERNEL)) { dd_dev_err(dd, "Unable to allocate cpu_mask for netdevs.\n"); return 0; } cpumask_and(node_cpu_mask, cpu_mask, cpumask_of_node(dd->node)); available_cpus = cpumask_weight(node_cpu_mask); free_cpumask_var(node_cpu_mask); return min3(available_cpus, available_contexts, (u32)HFI1_MAX_NETDEV_CTXTS); } static int hfi1_netdev_rxq_init(struct hfi1_netdev_rx *rx) { int i; int rc; struct hfi1_devdata *dd = rx->dd; struct net_device *dev = &rx->rx_napi; rx->num_rx_q = dd->num_netdev_contexts; rx->rxq = kcalloc_node(rx->num_rx_q, sizeof(*rx->rxq), GFP_KERNEL, dd->node); if (!rx->rxq) { dd_dev_err(dd, "Unable to allocate netdev queue data\n"); return (-ENOMEM); } for (i = 0; i < rx->num_rx_q; i++) { struct hfi1_netdev_rxq *rxq = &rx->rxq[i]; rc = hfi1_netdev_allot_ctxt(rx, &rxq->rcd); if (rc) goto bail_context_irq_failure; hfi1_rcd_get(rxq->rcd); rxq->rx = rx; rxq->rcd->napi = &rxq->napi; dd_dev_info(dd, "Setting rcv queue %d napi to context %d\n", i, rxq->rcd->ctxt); /* * Disable BUSY_POLL on this NAPI as this is not supported * right now. */ set_bit(NAPI_STATE_NO_BUSY_POLL, &rxq->napi.state); netif_napi_add(dev, &rxq->napi, hfi1_netdev_rx_napi); rc = msix_netdev_request_rcd_irq(rxq->rcd); if (rc) goto bail_context_irq_failure; } return 0; bail_context_irq_failure: dd_dev_err(dd, "Unable to allot receive context\n"); for (; i >= 0; i--) { struct hfi1_netdev_rxq *rxq = &rx->rxq[i]; if (rxq->rcd) { hfi1_netdev_deallocate_ctxt(dd, rxq->rcd); hfi1_rcd_put(rxq->rcd); rxq->rcd = NULL; } } kfree(rx->rxq); rx->rxq = NULL; return rc; } static void hfi1_netdev_rxq_deinit(struct hfi1_netdev_rx *rx) { int i; struct hfi1_devdata *dd = rx->dd; for (i = 0; i < rx->num_rx_q; i++) { struct hfi1_netdev_rxq *rxq = &rx->rxq[i]; netif_napi_del(&rxq->napi); hfi1_netdev_deallocate_ctxt(dd, rxq->rcd); hfi1_rcd_put(rxq->rcd); rxq->rcd = NULL; } kfree(rx->rxq); rx->rxq = NULL; rx->num_rx_q = 0; } static void enable_queues(struct hfi1_netdev_rx *rx) { int i; for (i = 0; i < rx->num_rx_q; i++) { struct hfi1_netdev_rxq *rxq = &rx->rxq[i]; dd_dev_info(rx->dd, "enabling queue %d on context %d\n", i, rxq->rcd->ctxt); napi_enable(&rxq->napi); hfi1_rcvctrl(rx->dd, HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB, rxq->rcd); } } static void disable_queues(struct hfi1_netdev_rx *rx) { int i; msix_netdev_synchronize_irq(rx->dd); for (i = 0; i < rx->num_rx_q; i++) { struct hfi1_netdev_rxq *rxq = &rx->rxq[i]; dd_dev_info(rx->dd, "disabling queue %d on context %d\n", i, rxq->rcd->ctxt); /* wait for napi if it was scheduled */ hfi1_rcvctrl(rx->dd, HFI1_RCVCTRL_CTXT_DIS | HFI1_RCVCTRL_INTRAVAIL_DIS, rxq->rcd); napi_synchronize(&rxq->napi); napi_disable(&rxq->napi); } } /** * hfi1_netdev_rx_init - Incrememnts netdevs counter. When called first time, * it allocates receive queue data and calls netif_napi_add * for each queue. * * @dd: hfi1 dev data */ int hfi1_netdev_rx_init(struct hfi1_devdata *dd) { struct hfi1_netdev_rx *rx = dd->netdev_rx; int res; if (atomic_fetch_inc(&rx->netdevs)) return 0; mutex_lock(&hfi1_mutex); res = hfi1_netdev_rxq_init(rx); mutex_unlock(&hfi1_mutex); return res; } /** * hfi1_netdev_rx_destroy - Decrements netdevs counter, when it reaches 0 * napi is deleted and receive queses memory is freed. * * @dd: hfi1 dev data */ int hfi1_netdev_rx_destroy(struct hfi1_devdata *dd) { struct hfi1_netdev_rx *rx = dd->netdev_rx; /* destroy the RX queues only if it is the last netdev going away */ if (atomic_fetch_add_unless(&rx->netdevs, -1, 0) == 1) { mutex_lock(&hfi1_mutex); hfi1_netdev_rxq_deinit(rx); mutex_unlock(&hfi1_mutex); } return 0; } /** * hfi1_alloc_rx - Allocates the rx support structure * @dd: hfi1 dev data * * Allocate the rx structure to support gathering the receive * resources and the dummy netdev. * * Updates dd struct pointer upon success. * * Return: 0 (success) -error on failure * */ int hfi1_alloc_rx(struct hfi1_devdata *dd) { struct hfi1_netdev_rx *rx; dd_dev_info(dd, "allocating rx size %ld\n", sizeof(*rx)); rx = kzalloc_node(sizeof(*rx), GFP_KERNEL, dd->node); if (!rx) return -ENOMEM; rx->dd = dd; init_dummy_netdev(&rx->rx_napi); xa_init(&rx->dev_tbl); atomic_set(&rx->enabled, 0); atomic_set(&rx->netdevs, 0); dd->netdev_rx = rx; return 0; } void hfi1_free_rx(struct hfi1_devdata *dd) { if (dd->netdev_rx) { dd_dev_info(dd, "hfi1 rx freed\n"); kfree(dd->netdev_rx); dd->netdev_rx = NULL; } } /** * hfi1_netdev_enable_queues - This is napi enable function. * It enables napi objects associated with queues. * When at least one device has called it it increments atomic counter. * Disable function decrements counter and when it is 0, * calls napi_disable for every queue. * * @dd: hfi1 dev data */ void hfi1_netdev_enable_queues(struct hfi1_devdata *dd) { struct hfi1_netdev_rx *rx; if (!dd->netdev_rx) return; rx = dd->netdev_rx; if (atomic_fetch_inc(&rx->enabled)) return; mutex_lock(&hfi1_mutex); enable_queues(rx); mutex_unlock(&hfi1_mutex); } void hfi1_netdev_disable_queues(struct hfi1_devdata *dd) { struct hfi1_netdev_rx *rx; if (!dd->netdev_rx) return; rx = dd->netdev_rx; if (atomic_dec_if_positive(&rx->enabled)) return; mutex_lock(&hfi1_mutex); disable_queues(rx); mutex_unlock(&hfi1_mutex); } /** * hfi1_netdev_add_data - Registers data with unique identifier * to be requested later this is needed for VNIC and IPoIB VLANs * implementations. * This call is protected by mutex idr_lock. * * @dd: hfi1 dev data * @id: requested integer id up to INT_MAX * @data: data to be associated with index */ int hfi1_netdev_add_data(struct hfi1_devdata *dd, int id, void *data) { struct hfi1_netdev_rx *rx = dd->netdev_rx; return xa_insert(&rx->dev_tbl, id, data, GFP_NOWAIT); } /** * hfi1_netdev_remove_data - Removes data with previously given id. * Returns the reference to removed entry. * * @dd: hfi1 dev data * @id: requested integer id up to INT_MAX */ void *hfi1_netdev_remove_data(struct hfi1_devdata *dd, int id) { struct hfi1_netdev_rx *rx = dd->netdev_rx; return xa_erase(&rx->dev_tbl, id); } /** * hfi1_netdev_get_data - Gets data with given id * * @dd: hfi1 dev data * @id: requested integer id up to INT_MAX */ void *hfi1_netdev_get_data(struct hfi1_devdata *dd, int id) { struct hfi1_netdev_rx *rx = dd->netdev_rx; return xa_load(&rx->dev_tbl, id); } /** * hfi1_netdev_get_first_data - Gets first entry with greater or equal id. * * @dd: hfi1 dev data * @start_id: requested integer id up to INT_MAX */ void *hfi1_netdev_get_first_data(struct hfi1_devdata *dd, int *start_id) { struct hfi1_netdev_rx *rx = dd->netdev_rx; unsigned long index = *start_id; void *ret; ret = xa_find(&rx->dev_tbl, &index, UINT_MAX, XA_PRESENT); *start_id = (int)index; return ret; }
linux-master
drivers/infiniband/hw/hfi1/netdev_rx.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2016 - 2018 Intel Corporation. */ #include "hfi.h" #include "verbs_txreq.h" #include "qp.h" #include "trace.h" #define TXREQ_LEN 24 void hfi1_put_txreq(struct verbs_txreq *tx) { struct hfi1_ibdev *dev; struct rvt_qp *qp; unsigned long flags; unsigned int seq; struct hfi1_qp_priv *priv; qp = tx->qp; dev = to_idev(qp->ibqp.device); if (tx->mr) rvt_put_mr(tx->mr); sdma_txclean(dd_from_dev(dev), &tx->txreq); /* Free verbs_txreq and return to slab cache */ kmem_cache_free(dev->verbs_txreq_cache, tx); do { seq = read_seqbegin(&dev->txwait_lock); if (!list_empty(&dev->txwait)) { struct iowait *wait; write_seqlock_irqsave(&dev->txwait_lock, flags); wait = list_first_entry(&dev->txwait, struct iowait, list); qp = iowait_to_qp(wait); priv = qp->priv; list_del_init(&priv->s_iowait.list); /* refcount held until actual wake up */ write_sequnlock_irqrestore(&dev->txwait_lock, flags); hfi1_qp_wakeup(qp, RVT_S_WAIT_TX); break; } } while (read_seqretry(&dev->txwait_lock, seq)); } struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, struct rvt_qp *qp) __must_hold(&qp->s_lock) { struct verbs_txreq *tx = NULL; write_seqlock(&dev->txwait_lock); if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { struct hfi1_qp_priv *priv; tx = kmem_cache_alloc(dev->verbs_txreq_cache, VERBS_TXREQ_GFP); if (tx) goto out; priv = qp->priv; if (list_empty(&priv->s_iowait.list)) { dev->n_txwait++; qp->s_flags |= RVT_S_WAIT_TX; list_add_tail(&priv->s_iowait.list, &dev->txwait); priv->s_iowait.lock = &dev->txwait_lock; trace_hfi1_qpsleep(qp, RVT_S_WAIT_TX); rvt_get_qp(qp); } qp->s_flags &= ~RVT_S_BUSY; } out: write_sequnlock(&dev->txwait_lock); return tx; } int verbs_txreq_init(struct hfi1_ibdev *dev) { char buf[TXREQ_LEN]; struct hfi1_devdata *dd = dd_from_dev(dev); snprintf(buf, sizeof(buf), "hfi1_%u_vtxreq_cache", dd->unit); dev->verbs_txreq_cache = kmem_cache_create(buf, sizeof(struct verbs_txreq), 0, SLAB_HWCACHE_ALIGN, NULL); if (!dev->verbs_txreq_cache) return -ENOMEM; return 0; } void verbs_txreq_exit(struct hfi1_ibdev *dev) { kmem_cache_destroy(dev->verbs_txreq_cache); dev->verbs_txreq_cache = NULL; }
linux-master
drivers/infiniband/hw/hfi1/verbs_txreq.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2015, 2016 Intel Corporation. */ #include <linux/firmware.h> #include "hfi.h" #include "efivar.h" #include "eprom.h" #define DEFAULT_PLATFORM_CONFIG_NAME "hfi1_platform.dat" static int validate_scratch_checksum(struct hfi1_devdata *dd) { u64 checksum = 0, temp_scratch = 0; int i, j, version; temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH); version = (temp_scratch & BITMAP_VERSION_SMASK) >> BITMAP_VERSION_SHIFT; /* Prevent power on default of all zeroes from passing checksum */ if (!version) { dd_dev_err(dd, "%s: Config bitmap uninitialized\n", __func__); dd_dev_err(dd, "%s: Please update your BIOS to support active channels\n", __func__); return 0; } /* * ASIC scratch 0 only contains the checksum and bitmap version as * fields of interest, both of which are handled separately from the * loop below, so skip it */ checksum += version; for (i = 1; i < ASIC_NUM_SCRATCH; i++) { temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH + (8 * i)); for (j = sizeof(u64); j != 0; j -= 2) { checksum += (temp_scratch & 0xFFFF); temp_scratch >>= 16; } } while (checksum >> 16) checksum = (checksum & CHECKSUM_MASK) + (checksum >> 16); temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH); temp_scratch &= CHECKSUM_SMASK; temp_scratch >>= CHECKSUM_SHIFT; if (checksum + temp_scratch == 0xFFFF) return 1; dd_dev_err(dd, "%s: Configuration bitmap corrupted\n", __func__); return 0; } static void save_platform_config_fields(struct hfi1_devdata *dd) { struct hfi1_pportdata *ppd = dd->pport; u64 temp_scratch = 0, temp_dest = 0; temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH_1); temp_dest = temp_scratch & (dd->hfi1_id ? PORT1_PORT_TYPE_SMASK : PORT0_PORT_TYPE_SMASK); ppd->port_type = temp_dest >> (dd->hfi1_id ? PORT1_PORT_TYPE_SHIFT : PORT0_PORT_TYPE_SHIFT); temp_dest = temp_scratch & (dd->hfi1_id ? PORT1_LOCAL_ATTEN_SMASK : PORT0_LOCAL_ATTEN_SMASK); ppd->local_atten = temp_dest >> (dd->hfi1_id ? PORT1_LOCAL_ATTEN_SHIFT : PORT0_LOCAL_ATTEN_SHIFT); temp_dest = temp_scratch & (dd->hfi1_id ? PORT1_REMOTE_ATTEN_SMASK : PORT0_REMOTE_ATTEN_SMASK); ppd->remote_atten = temp_dest >> (dd->hfi1_id ? PORT1_REMOTE_ATTEN_SHIFT : PORT0_REMOTE_ATTEN_SHIFT); temp_dest = temp_scratch & (dd->hfi1_id ? PORT1_DEFAULT_ATTEN_SMASK : PORT0_DEFAULT_ATTEN_SMASK); ppd->default_atten = temp_dest >> (dd->hfi1_id ? PORT1_DEFAULT_ATTEN_SHIFT : PORT0_DEFAULT_ATTEN_SHIFT); temp_scratch = read_csr(dd, dd->hfi1_id ? ASIC_CFG_SCRATCH_3 : ASIC_CFG_SCRATCH_2); ppd->tx_preset_eq = (temp_scratch & TX_EQ_SMASK) >> TX_EQ_SHIFT; ppd->tx_preset_noeq = (temp_scratch & TX_NO_EQ_SMASK) >> TX_NO_EQ_SHIFT; ppd->rx_preset = (temp_scratch & RX_SMASK) >> RX_SHIFT; ppd->max_power_class = (temp_scratch & QSFP_MAX_POWER_SMASK) >> QSFP_MAX_POWER_SHIFT; ppd->config_from_scratch = true; } void get_platform_config(struct hfi1_devdata *dd) { int ret = 0; u8 *temp_platform_config = NULL; u32 esize; const struct firmware *platform_config_file = NULL; if (is_integrated(dd)) { if (validate_scratch_checksum(dd)) { save_platform_config_fields(dd); return; } } else { ret = eprom_read_platform_config(dd, (void **)&temp_platform_config, &esize); if (!ret) { /* success */ dd->platform_config.data = temp_platform_config; dd->platform_config.size = esize; return; } } dd_dev_err(dd, "%s: Failed to get platform config, falling back to sub-optimal default file\n", __func__); ret = request_firmware(&platform_config_file, DEFAULT_PLATFORM_CONFIG_NAME, &dd->pcidev->dev); if (ret) { dd_dev_err(dd, "%s: No default platform config file found\n", __func__); return; } /* * Allocate separate memory block to store data and free firmware * structure. This allows free_platform_config to treat EPROM and * fallback configs in the same manner. */ dd->platform_config.data = kmemdup(platform_config_file->data, platform_config_file->size, GFP_KERNEL); dd->platform_config.size = platform_config_file->size; release_firmware(platform_config_file); } void free_platform_config(struct hfi1_devdata *dd) { /* Release memory allocated for eprom or fallback file read. */ kfree(dd->platform_config.data); dd->platform_config.data = NULL; } void get_port_type(struct hfi1_pportdata *ppd) { int ret; u32 temp; ret = get_platform_config_field(ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0, PORT_TABLE_PORT_TYPE, &temp, 4); if (ret) { ppd->port_type = PORT_TYPE_UNKNOWN; return; } ppd->port_type = temp; } int set_qsfp_tx(struct hfi1_pportdata *ppd, int on) { u8 tx_ctrl_byte = on ? 0x0 : 0xF; int ret = 0; ret = qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_TX_CTRL_BYTE_OFFS, &tx_ctrl_byte, 1); /* we expected 1, so consider 0 an error */ if (ret == 0) ret = -EIO; else if (ret == 1) ret = 0; return ret; } static int qual_power(struct hfi1_pportdata *ppd) { u32 cable_power_class = 0, power_class_max = 0; u8 *cache = ppd->qsfp_info.cache; int ret = 0; ret = get_platform_config_field( ppd->dd, PLATFORM_CONFIG_SYSTEM_TABLE, 0, SYSTEM_TABLE_QSFP_POWER_CLASS_MAX, &power_class_max, 4); if (ret) return ret; cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]); if (cable_power_class > power_class_max) ppd->offline_disabled_reason = HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY); if (ppd->offline_disabled_reason == HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY)) { dd_dev_err( ppd->dd, "%s: Port disabled due to system power restrictions\n", __func__); ret = -EPERM; } return ret; } static int qual_bitrate(struct hfi1_pportdata *ppd) { u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled; u8 *cache = ppd->qsfp_info.cache; if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G) && cache[QSFP_NOM_BIT_RATE_250_OFFS] < 0x64) ppd->offline_disabled_reason = HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY); if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G) && cache[QSFP_NOM_BIT_RATE_100_OFFS] < 0x7D) ppd->offline_disabled_reason = HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY); if (ppd->offline_disabled_reason == HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY)) { dd_dev_err( ppd->dd, "%s: Cable failed bitrate check, disabling port\n", __func__); return -EPERM; } return 0; } static int set_qsfp_high_power(struct hfi1_pportdata *ppd) { u8 cable_power_class = 0, power_ctrl_byte = 0; u8 *cache = ppd->qsfp_info.cache; int ret; cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]); if (cable_power_class > QSFP_POWER_CLASS_1) { power_ctrl_byte = cache[QSFP_PWR_CTRL_BYTE_OFFS]; power_ctrl_byte |= 1; power_ctrl_byte &= ~(0x2); ret = qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_PWR_CTRL_BYTE_OFFS, &power_ctrl_byte, 1); if (ret != 1) return -EIO; if (cable_power_class > QSFP_POWER_CLASS_4) { power_ctrl_byte |= (1 << 2); ret = qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_PWR_CTRL_BYTE_OFFS, &power_ctrl_byte, 1); if (ret != 1) return -EIO; } /* SFF 8679 rev 1.7 LPMode Deassert time */ msleep(300); } return 0; } static void apply_rx_cdr(struct hfi1_pportdata *ppd, u32 rx_preset_index, u8 *cdr_ctrl_byte) { u32 rx_preset; u8 *cache = ppd->qsfp_info.cache; int cable_power_class; if (!((cache[QSFP_MOD_PWR_OFFS] & 0x4) && (cache[QSFP_CDR_INFO_OFFS] & 0x40))) return; /* RX CDR present, bypass supported */ cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]); if (cable_power_class <= QSFP_POWER_CLASS_3) { /* Power class <= 3, ignore config & turn RX CDR on */ *cdr_ctrl_byte |= 0xF; return; } get_platform_config_field( ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE, rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR_APPLY, &rx_preset, 4); if (!rx_preset) { dd_dev_info( ppd->dd, "%s: RX_CDR_APPLY is set to disabled\n", __func__); return; } get_platform_config_field( ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE, rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR, &rx_preset, 4); /* Expand cdr setting to all 4 lanes */ rx_preset = (rx_preset | (rx_preset << 1) | (rx_preset << 2) | (rx_preset << 3)); if (rx_preset) { *cdr_ctrl_byte |= rx_preset; } else { *cdr_ctrl_byte &= rx_preset; /* Preserve current TX CDR status */ *cdr_ctrl_byte |= (cache[QSFP_CDR_CTRL_BYTE_OFFS] & 0xF0); } } static void apply_tx_cdr(struct hfi1_pportdata *ppd, u32 tx_preset_index, u8 *cdr_ctrl_byte) { u32 tx_preset; u8 *cache = ppd->qsfp_info.cache; int cable_power_class; if (!((cache[QSFP_MOD_PWR_OFFS] & 0x8) && (cache[QSFP_CDR_INFO_OFFS] & 0x80))) return; /* TX CDR present, bypass supported */ cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]); if (cable_power_class <= QSFP_POWER_CLASS_3) { /* Power class <= 3, ignore config & turn TX CDR on */ *cdr_ctrl_byte |= 0xF0; return; } get_platform_config_field( ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index, TX_PRESET_TABLE_QSFP_TX_CDR_APPLY, &tx_preset, 4); if (!tx_preset) { dd_dev_info( ppd->dd, "%s: TX_CDR_APPLY is set to disabled\n", __func__); return; } get_platform_config_field( ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index, TX_PRESET_TABLE_QSFP_TX_CDR, &tx_preset, 4); /* Expand cdr setting to all 4 lanes */ tx_preset = (tx_preset | (tx_preset << 1) | (tx_preset << 2) | (tx_preset << 3)); if (tx_preset) *cdr_ctrl_byte |= (tx_preset << 4); else /* Preserve current/determined RX CDR status */ *cdr_ctrl_byte &= ((tx_preset << 4) | 0xF); } static void apply_cdr_settings( struct hfi1_pportdata *ppd, u32 rx_preset_index, u32 tx_preset_index) { u8 *cache = ppd->qsfp_info.cache; u8 cdr_ctrl_byte = cache[QSFP_CDR_CTRL_BYTE_OFFS]; apply_rx_cdr(ppd, rx_preset_index, &cdr_ctrl_byte); apply_tx_cdr(ppd, tx_preset_index, &cdr_ctrl_byte); qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_CDR_CTRL_BYTE_OFFS, &cdr_ctrl_byte, 1); } static void apply_tx_eq_auto(struct hfi1_pportdata *ppd) { u8 *cache = ppd->qsfp_info.cache; u8 tx_eq; if (!(cache[QSFP_EQ_INFO_OFFS] & 0x8)) return; /* Disable adaptive TX EQ if present */ tx_eq = cache[(128 * 3) + 241]; tx_eq &= 0xF0; qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 241, &tx_eq, 1); } static void apply_tx_eq_prog(struct hfi1_pportdata *ppd, u32 tx_preset_index) { u8 *cache = ppd->qsfp_info.cache; u32 tx_preset; u8 tx_eq; if (!(cache[QSFP_EQ_INFO_OFFS] & 0x4)) return; get_platform_config_field( ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index, TX_PRESET_TABLE_QSFP_TX_EQ_APPLY, &tx_preset, 4); if (!tx_preset) { dd_dev_info( ppd->dd, "%s: TX_EQ_APPLY is set to disabled\n", __func__); return; } get_platform_config_field( ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index, TX_PRESET_TABLE_QSFP_TX_EQ, &tx_preset, 4); if (((cache[(128 * 3) + 224] & 0xF0) >> 4) < tx_preset) { dd_dev_info( ppd->dd, "%s: TX EQ %x unsupported\n", __func__, tx_preset); dd_dev_info( ppd->dd, "%s: Applying EQ %x\n", __func__, cache[608] & 0xF0); tx_preset = (cache[608] & 0xF0) >> 4; } tx_eq = tx_preset | (tx_preset << 4); qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 234, &tx_eq, 1); qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 235, &tx_eq, 1); } static void apply_rx_eq_emp(struct hfi1_pportdata *ppd, u32 rx_preset_index) { u32 rx_preset; u8 rx_eq, *cache = ppd->qsfp_info.cache; if (!(cache[QSFP_EQ_INFO_OFFS] & 0x2)) return; get_platform_config_field( ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE, rx_preset_index, RX_PRESET_TABLE_QSFP_RX_EMP_APPLY, &rx_preset, 4); if (!rx_preset) { dd_dev_info( ppd->dd, "%s: RX_EMP_APPLY is set to disabled\n", __func__); return; } get_platform_config_field( ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE, rx_preset_index, RX_PRESET_TABLE_QSFP_RX_EMP, &rx_preset, 4); if ((cache[(128 * 3) + 224] & 0xF) < rx_preset) { dd_dev_info( ppd->dd, "%s: Requested RX EMP %x\n", __func__, rx_preset); dd_dev_info( ppd->dd, "%s: Applying supported EMP %x\n", __func__, cache[608] & 0xF); rx_preset = cache[608] & 0xF; } rx_eq = rx_preset | (rx_preset << 4); qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 236, &rx_eq, 1); qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 237, &rx_eq, 1); } static void apply_eq_settings(struct hfi1_pportdata *ppd, u32 rx_preset_index, u32 tx_preset_index) { u8 *cache = ppd->qsfp_info.cache; /* no point going on w/o a page 3 */ if (cache[2] & 4) { dd_dev_info(ppd->dd, "%s: Upper page 03 not present\n", __func__); return; } apply_tx_eq_auto(ppd); apply_tx_eq_prog(ppd, tx_preset_index); apply_rx_eq_emp(ppd, rx_preset_index); } static void apply_rx_amplitude_settings( struct hfi1_pportdata *ppd, u32 rx_preset_index, u32 tx_preset_index) { u32 rx_preset; u8 rx_amp = 0, i = 0, preferred = 0, *cache = ppd->qsfp_info.cache; /* no point going on w/o a page 3 */ if (cache[2] & 4) { dd_dev_info(ppd->dd, "%s: Upper page 03 not present\n", __func__); return; } if (!(cache[QSFP_EQ_INFO_OFFS] & 0x1)) { dd_dev_info(ppd->dd, "%s: RX_AMP_APPLY is set to disabled\n", __func__); return; } get_platform_config_field(ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE, rx_preset_index, RX_PRESET_TABLE_QSFP_RX_AMP_APPLY, &rx_preset, 4); if (!rx_preset) { dd_dev_info(ppd->dd, "%s: RX_AMP_APPLY is set to disabled\n", __func__); return; } get_platform_config_field(ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE, rx_preset_index, RX_PRESET_TABLE_QSFP_RX_AMP, &rx_preset, 4); dd_dev_info(ppd->dd, "%s: Requested RX AMP %x\n", __func__, rx_preset); for (i = 0; i < 4; i++) { if (cache[(128 * 3) + 225] & (1 << i)) { preferred = i; if (preferred == rx_preset) break; } } /* * Verify that preferred RX amplitude is not just a * fall through of the default */ if (!preferred && !(cache[(128 * 3) + 225] & 0x1)) { dd_dev_info(ppd->dd, "No supported RX AMP, not applying\n"); return; } dd_dev_info(ppd->dd, "%s: Applying RX AMP %x\n", __func__, preferred); rx_amp = preferred | (preferred << 4); qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 238, &rx_amp, 1); qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 239, &rx_amp, 1); } #define OPA_INVALID_INDEX 0xFFF static void apply_tx_lanes(struct hfi1_pportdata *ppd, u8 field_id, u32 config_data, const char *message) { u8 i; int ret; for (i = 0; i < 4; i++) { ret = load_8051_config(ppd->dd, field_id, i, config_data); if (ret != HCMD_SUCCESS) { dd_dev_err( ppd->dd, "%s: %s for lane %u failed\n", message, __func__, i); } } } /* * Return a special SerDes setting for low power AOC cables. The power class * threshold and setting being used were all found by empirical testing. * * Summary of the logic: * * if (QSFP and QSFP_TYPE == AOC and QSFP_POWER_CLASS < 4) * return 0xe * return 0; // leave at default */ static u8 aoc_low_power_setting(struct hfi1_pportdata *ppd) { u8 *cache = ppd->qsfp_info.cache; int power_class; /* QSFP only */ if (ppd->port_type != PORT_TYPE_QSFP) return 0; /* leave at default */ /* active optical cables only */ switch ((cache[QSFP_MOD_TECH_OFFS] & 0xF0) >> 4) { case 0x0 ... 0x9: fallthrough; case 0xC: fallthrough; case 0xE: /* active AOC */ power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]); if (power_class < QSFP_POWER_CLASS_4) return 0xe; } return 0; /* leave at default */ } static void apply_tunings( struct hfi1_pportdata *ppd, u32 tx_preset_index, u8 tuning_method, u32 total_atten, u8 limiting_active) { int ret = 0; u32 config_data = 0, tx_preset = 0; u8 precur = 0, attn = 0, postcur = 0, external_device_config = 0; u8 *cache = ppd->qsfp_info.cache; /* Pass tuning method to 8051 */ read_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG, &config_data); config_data &= ~(0xff << TUNING_METHOD_SHIFT); config_data |= ((u32)tuning_method << TUNING_METHOD_SHIFT); ret = load_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG, config_data); if (ret != HCMD_SUCCESS) dd_dev_err(ppd->dd, "%s: Failed to set tuning method\n", __func__); /* Set same channel loss for both TX and RX */ config_data = 0 | (total_atten << 16) | (total_atten << 24); apply_tx_lanes(ppd, CHANNEL_LOSS_SETTINGS, config_data, "Setting channel loss"); /* Inform 8051 of cable capabilities */ if (ppd->qsfp_info.cache_valid) { external_device_config = ((cache[QSFP_MOD_PWR_OFFS] & 0x4) << 3) | ((cache[QSFP_MOD_PWR_OFFS] & 0x8) << 2) | ((cache[QSFP_EQ_INFO_OFFS] & 0x2) << 1) | (cache[QSFP_EQ_INFO_OFFS] & 0x4); ret = read_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS, GENERAL_CONFIG, &config_data); /* Clear, then set the external device config field */ config_data &= ~(u32)0xFF; config_data |= external_device_config; ret = load_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS, GENERAL_CONFIG, config_data); if (ret != HCMD_SUCCESS) dd_dev_err(ppd->dd, "%s: Failed set ext device config params\n", __func__); } if (tx_preset_index == OPA_INVALID_INDEX) { if (ppd->port_type == PORT_TYPE_QSFP && limiting_active) dd_dev_err(ppd->dd, "%s: Invalid Tx preset index\n", __func__); return; } /* Following for limiting active channels only */ get_platform_config_field( ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index, TX_PRESET_TABLE_PRECUR, &tx_preset, 4); precur = tx_preset; get_platform_config_field( ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index, TX_PRESET_TABLE_ATTN, &tx_preset, 4); attn = tx_preset; get_platform_config_field( ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index, TX_PRESET_TABLE_POSTCUR, &tx_preset, 4); postcur = tx_preset; /* * NOTES: * o The aoc_low_power_setting is applied to all lanes even * though only lane 0's value is examined by the firmware. * o A lingering low power setting after a cable swap does * not occur. On cable unplug the 8051 is reset and * restarted on cable insert. This resets all settings to * their default, erasing any previous low power setting. */ config_data = precur | (attn << 8) | (postcur << 16) | (aoc_low_power_setting(ppd) << 24); apply_tx_lanes(ppd, TX_EQ_SETTINGS, config_data, "Applying TX settings"); } /* Must be holding the QSFP i2c resource */ static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset, u32 *ptr_rx_preset, u32 *ptr_total_atten) { int ret; u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled; u8 *cache = ppd->qsfp_info.cache; ppd->qsfp_info.limiting_active = 1; ret = set_qsfp_tx(ppd, 0); if (ret) return ret; ret = qual_power(ppd); if (ret) return ret; ret = qual_bitrate(ppd); if (ret) return ret; /* * We'll change the QSFP memory contents from here on out, thus we set a * flag here to remind ourselves to reset the QSFP module. This prevents * reuse of stale settings established in our previous pass through. */ if (ppd->qsfp_info.reset_needed) { ret = reset_qsfp(ppd); if (ret) return ret; refresh_qsfp_cache(ppd, &ppd->qsfp_info); } else { ppd->qsfp_info.reset_needed = 1; } ret = set_qsfp_high_power(ppd); if (ret) return ret; if (cache[QSFP_EQ_INFO_OFFS] & 0x4) { ret = get_platform_config_field( ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0, PORT_TABLE_TX_PRESET_IDX_ACTIVE_EQ, ptr_tx_preset, 4); if (ret) { *ptr_tx_preset = OPA_INVALID_INDEX; return ret; } } else { ret = get_platform_config_field( ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0, PORT_TABLE_TX_PRESET_IDX_ACTIVE_NO_EQ, ptr_tx_preset, 4); if (ret) { *ptr_tx_preset = OPA_INVALID_INDEX; return ret; } } ret = get_platform_config_field( ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0, PORT_TABLE_RX_PRESET_IDX, ptr_rx_preset, 4); if (ret) { *ptr_rx_preset = OPA_INVALID_INDEX; return ret; } if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G)) get_platform_config_field( ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0, PORT_TABLE_LOCAL_ATTEN_25G, ptr_total_atten, 4); else if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G)) get_platform_config_field( ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0, PORT_TABLE_LOCAL_ATTEN_12G, ptr_total_atten, 4); apply_cdr_settings(ppd, *ptr_rx_preset, *ptr_tx_preset); apply_eq_settings(ppd, *ptr_rx_preset, *ptr_tx_preset); apply_rx_amplitude_settings(ppd, *ptr_rx_preset, *ptr_tx_preset); ret = set_qsfp_tx(ppd, 1); return ret; } static int tune_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset, u32 *ptr_rx_preset, u8 *ptr_tuning_method, u32 *ptr_total_atten) { u32 cable_atten = 0, remote_atten = 0, platform_atten = 0; u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled; int ret = 0; u8 *cache = ppd->qsfp_info.cache; switch ((cache[QSFP_MOD_TECH_OFFS] & 0xF0) >> 4) { case 0xA ... 0xB: ret = get_platform_config_field( ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0, PORT_TABLE_LOCAL_ATTEN_25G, &platform_atten, 4); if (ret) return ret; if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G)) cable_atten = cache[QSFP_CU_ATTEN_12G_OFFS]; else if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G)) cable_atten = cache[QSFP_CU_ATTEN_7G_OFFS]; /* Fallback to configured attenuation if cable memory is bad */ if (cable_atten == 0 || cable_atten > 36) { ret = get_platform_config_field( ppd->dd, PLATFORM_CONFIG_SYSTEM_TABLE, 0, SYSTEM_TABLE_QSFP_ATTENUATION_DEFAULT_25G, &cable_atten, 4); if (ret) return ret; } ret = get_platform_config_field( ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0, PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4); if (ret) return ret; *ptr_total_atten = platform_atten + cable_atten + remote_atten; *ptr_tuning_method = OPA_PASSIVE_TUNING; break; case 0x0 ... 0x9: fallthrough; case 0xC: fallthrough; case 0xE: ret = tune_active_qsfp(ppd, ptr_tx_preset, ptr_rx_preset, ptr_total_atten); if (ret) return ret; *ptr_tuning_method = OPA_ACTIVE_TUNING; break; case 0xD: fallthrough; case 0xF: default: dd_dev_warn(ppd->dd, "%s: Unknown/unsupported cable\n", __func__); break; } return ret; } /* * This function communicates its success or failure via ppd->driver_link_ready * Thus, it depends on its association with start_link(...) which checks * driver_link_ready before proceeding with the link negotiation and * initialization process. */ void tune_serdes(struct hfi1_pportdata *ppd) { int ret = 0; u32 total_atten = 0; u32 remote_atten = 0, platform_atten = 0; u32 rx_preset_index, tx_preset_index; u8 tuning_method = 0, limiting_active = 0; struct hfi1_devdata *dd = ppd->dd; rx_preset_index = OPA_INVALID_INDEX; tx_preset_index = OPA_INVALID_INDEX; /* the link defaults to enabled */ ppd->link_enabled = 1; /* the driver link ready state defaults to not ready */ ppd->driver_link_ready = 0; ppd->offline_disabled_reason = HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE); /* Skip the tuning for testing (loopback != none) and simulations */ if (loopback != LOOPBACK_NONE || ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR) { ppd->driver_link_ready = 1; if (qsfp_mod_present(ppd)) { ret = acquire_chip_resource(ppd->dd, qsfp_resource(ppd->dd), QSFP_WAIT); if (ret) { dd_dev_err(ppd->dd, "%s: hfi%d: cannot lock i2c chain\n", __func__, (int)ppd->dd->hfi1_id); goto bail; } refresh_qsfp_cache(ppd, &ppd->qsfp_info); release_chip_resource(ppd->dd, qsfp_resource(ppd->dd)); } return; } switch (ppd->port_type) { case PORT_TYPE_DISCONNECTED: ppd->offline_disabled_reason = HFI1_ODR_MASK(OPA_LINKDOWN_REASON_DISCONNECTED); dd_dev_warn(dd, "%s: Port disconnected, disabling port\n", __func__); goto bail; case PORT_TYPE_FIXED: /* platform_atten, remote_atten pre-zeroed to catch error */ get_platform_config_field( ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0, PORT_TABLE_LOCAL_ATTEN_25G, &platform_atten, 4); get_platform_config_field( ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0, PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4); total_atten = platform_atten + remote_atten; tuning_method = OPA_PASSIVE_TUNING; break; case PORT_TYPE_VARIABLE: if (qsfp_mod_present(ppd)) { /* * platform_atten, remote_atten pre-zeroed to * catch error */ get_platform_config_field( ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0, PORT_TABLE_LOCAL_ATTEN_25G, &platform_atten, 4); get_platform_config_field( ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0, PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4); total_atten = platform_atten + remote_atten; tuning_method = OPA_PASSIVE_TUNING; } else { ppd->offline_disabled_reason = HFI1_ODR_MASK(OPA_LINKDOWN_REASON_CHASSIS_CONFIG); goto bail; } break; case PORT_TYPE_QSFP: if (qsfp_mod_present(ppd)) { ret = acquire_chip_resource(ppd->dd, qsfp_resource(ppd->dd), QSFP_WAIT); if (ret) { dd_dev_err(ppd->dd, "%s: hfi%d: cannot lock i2c chain\n", __func__, (int)ppd->dd->hfi1_id); goto bail; } refresh_qsfp_cache(ppd, &ppd->qsfp_info); if (ppd->qsfp_info.cache_valid) { ret = tune_qsfp(ppd, &tx_preset_index, &rx_preset_index, &tuning_method, &total_atten); /* * We may have modified the QSFP memory, so * update the cache to reflect the changes */ refresh_qsfp_cache(ppd, &ppd->qsfp_info); limiting_active = ppd->qsfp_info.limiting_active; } else { dd_dev_err(dd, "%s: Reading QSFP memory failed\n", __func__); ret = -EINVAL; /* a fail indication */ } release_chip_resource(ppd->dd, qsfp_resource(ppd->dd)); if (ret) goto bail; } else { ppd->offline_disabled_reason = HFI1_ODR_MASK( OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED); goto bail; } break; default: dd_dev_warn(ppd->dd, "%s: Unknown port type\n", __func__); ppd->port_type = PORT_TYPE_UNKNOWN; tuning_method = OPA_UNKNOWN_TUNING; total_atten = 0; limiting_active = 0; tx_preset_index = OPA_INVALID_INDEX; break; } if (ppd->offline_disabled_reason == HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)) apply_tunings(ppd, tx_preset_index, tuning_method, total_atten, limiting_active); if (!ret) ppd->driver_link_ready = 1; return; bail: ppd->driver_link_ready = 0; }
linux-master
drivers/infiniband/hw/hfi1/platform.c
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) /* * Copyright(c) 2020 Intel Corporation. * */ /* * This file contains HFI1 support for IPOIB SDMA functionality */ #include <linux/log2.h> #include <linux/circ_buf.h> #include "sdma.h" #include "verbs.h" #include "trace_ibhdrs.h" #include "ipoib.h" #include "trace_tx.h" /* Add a convenience helper */ #define CIRC_ADD(val, add, size) (((val) + (add)) & ((size) - 1)) #define CIRC_NEXT(val, size) CIRC_ADD(val, 1, size) #define CIRC_PREV(val, size) CIRC_ADD(val, -1, size) struct ipoib_txparms { struct hfi1_devdata *dd; struct rdma_ah_attr *ah_attr; struct hfi1_ibport *ibp; struct hfi1_ipoib_txq *txq; union hfi1_ipoib_flow flow; u32 dqpn; u8 hdr_dwords; u8 entropy; }; static struct ipoib_txreq * hfi1_txreq_from_idx(struct hfi1_ipoib_circ_buf *r, u32 idx) { return (struct ipoib_txreq *)(r->items + (idx << r->shift)); } static u32 hfi1_ipoib_txreqs(const u64 sent, const u64 completed) { return sent - completed; } static u64 hfi1_ipoib_used(struct hfi1_ipoib_txq *txq) { return hfi1_ipoib_txreqs(txq->tx_ring.sent_txreqs, txq->tx_ring.complete_txreqs); } static void hfi1_ipoib_stop_txq(struct hfi1_ipoib_txq *txq) { trace_hfi1_txq_stop(txq); if (atomic_inc_return(&txq->tx_ring.stops) == 1) netif_stop_subqueue(txq->priv->netdev, txq->q_idx); } static void hfi1_ipoib_wake_txq(struct hfi1_ipoib_txq *txq) { trace_hfi1_txq_wake(txq); if (atomic_dec_and_test(&txq->tx_ring.stops)) netif_wake_subqueue(txq->priv->netdev, txq->q_idx); } static uint hfi1_ipoib_ring_hwat(struct hfi1_ipoib_txq *txq) { return min_t(uint, txq->priv->netdev->tx_queue_len, txq->tx_ring.max_items - 1); } static uint hfi1_ipoib_ring_lwat(struct hfi1_ipoib_txq *txq) { return min_t(uint, txq->priv->netdev->tx_queue_len, txq->tx_ring.max_items) >> 1; } static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq) { ++txq->tx_ring.sent_txreqs; if (hfi1_ipoib_used(txq) >= hfi1_ipoib_ring_hwat(txq) && !atomic_xchg(&txq->tx_ring.ring_full, 1)) { trace_hfi1_txq_full(txq); hfi1_ipoib_stop_txq(txq); } } static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq) { struct net_device *dev = txq->priv->netdev; /* If shutting down just return as queue state is irrelevant */ if (unlikely(dev->reg_state != NETREG_REGISTERED)) return; /* * When the queue has been drained to less than half full it will be * restarted. * The size of the txreq ring is fixed at initialization. * The tx queue len can be adjusted upward while the interface is * running. * The tx queue len can be large enough to overflow the txreq_ring. * Use the minimum of the current tx_queue_len or the rings max txreqs * to protect against ring overflow. */ if (hfi1_ipoib_used(txq) < hfi1_ipoib_ring_lwat(txq) && atomic_xchg(&txq->tx_ring.ring_full, 0)) { trace_hfi1_txq_xmit_unstopped(txq); hfi1_ipoib_wake_txq(txq); } } static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget) { struct hfi1_ipoib_dev_priv *priv = tx->txq->priv; if (likely(!tx->sdma_status)) { dev_sw_netstats_tx_add(priv->netdev, 1, tx->skb->len); } else { ++priv->netdev->stats.tx_errors; dd_dev_warn(priv->dd, "%s: Status = 0x%x pbc 0x%llx txq = %d sde = %d\n", __func__, tx->sdma_status, le64_to_cpu(tx->sdma_hdr->pbc), tx->txq->q_idx, tx->txq->sde->this_idx); } napi_consume_skb(tx->skb, budget); tx->skb = NULL; sdma_txclean(priv->dd, &tx->txreq); } static void hfi1_ipoib_drain_tx_ring(struct hfi1_ipoib_txq *txq) { struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring; int i; struct ipoib_txreq *tx; for (i = 0; i < tx_ring->max_items; i++) { tx = hfi1_txreq_from_idx(tx_ring, i); tx->complete = 0; dev_kfree_skb_any(tx->skb); tx->skb = NULL; sdma_txclean(txq->priv->dd, &tx->txreq); } tx_ring->head = 0; tx_ring->tail = 0; tx_ring->complete_txreqs = 0; tx_ring->sent_txreqs = 0; tx_ring->avail = hfi1_ipoib_ring_hwat(txq); } static int hfi1_ipoib_poll_tx_ring(struct napi_struct *napi, int budget) { struct hfi1_ipoib_txq *txq = container_of(napi, struct hfi1_ipoib_txq, napi); struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring; u32 head = tx_ring->head; u32 max_tx = tx_ring->max_items; int work_done; struct ipoib_txreq *tx = hfi1_txreq_from_idx(tx_ring, head); trace_hfi1_txq_poll(txq); for (work_done = 0; work_done < budget; work_done++) { /* See hfi1_ipoib_sdma_complete() */ if (!smp_load_acquire(&tx->complete)) break; tx->complete = 0; trace_hfi1_tx_produce(tx, head); hfi1_ipoib_free_tx(tx, budget); head = CIRC_NEXT(head, max_tx); tx = hfi1_txreq_from_idx(tx_ring, head); } tx_ring->complete_txreqs += work_done; /* Finished freeing tx items so store the head value. */ smp_store_release(&tx_ring->head, head); hfi1_ipoib_check_queue_stopped(txq); if (work_done < budget) napi_complete_done(napi, work_done); return work_done; } static void hfi1_ipoib_sdma_complete(struct sdma_txreq *txreq, int status) { struct ipoib_txreq *tx = container_of(txreq, struct ipoib_txreq, txreq); trace_hfi1_txq_complete(tx->txq); tx->sdma_status = status; /* see hfi1_ipoib_poll_tx_ring */ smp_store_release(&tx->complete, 1); napi_schedule_irqoff(&tx->txq->napi); } static int hfi1_ipoib_build_ulp_payload(struct ipoib_txreq *tx, struct ipoib_txparms *txp) { struct hfi1_devdata *dd = txp->dd; struct sdma_txreq *txreq = &tx->txreq; struct sk_buff *skb = tx->skb; int ret = 0; int i; if (skb_headlen(skb)) { ret = sdma_txadd_kvaddr(dd, txreq, skb->data, skb_headlen(skb)); if (unlikely(ret)) return ret; } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; ret = sdma_txadd_page(dd, txreq, skb_frag_page(frag), frag->bv_offset, skb_frag_size(frag), NULL, NULL, NULL); if (unlikely(ret)) break; } return ret; } static int hfi1_ipoib_build_tx_desc(struct ipoib_txreq *tx, struct ipoib_txparms *txp) { struct hfi1_devdata *dd = txp->dd; struct sdma_txreq *txreq = &tx->txreq; struct hfi1_sdma_header *sdma_hdr = tx->sdma_hdr; u16 pkt_bytes = sizeof(sdma_hdr->pbc) + (txp->hdr_dwords << 2) + tx->skb->len; int ret; ret = sdma_txinit(txreq, 0, pkt_bytes, hfi1_ipoib_sdma_complete); if (unlikely(ret)) return ret; /* add pbc + headers */ ret = sdma_txadd_kvaddr(dd, txreq, sdma_hdr, sizeof(sdma_hdr->pbc) + (txp->hdr_dwords << 2)); if (unlikely(ret)) return ret; /* add the ulp payload */ return hfi1_ipoib_build_ulp_payload(tx, txp); } static void hfi1_ipoib_build_ib_tx_headers(struct ipoib_txreq *tx, struct ipoib_txparms *txp) { struct hfi1_ipoib_dev_priv *priv = tx->txq->priv; struct hfi1_sdma_header *sdma_hdr = tx->sdma_hdr; struct sk_buff *skb = tx->skb; struct hfi1_pportdata *ppd = ppd_from_ibp(txp->ibp); struct rdma_ah_attr *ah_attr = txp->ah_attr; struct ib_other_headers *ohdr; struct ib_grh *grh; u16 dwords; u16 slid; u16 dlid; u16 lrh0; u32 bth0; u32 sqpn = (u32)(priv->netdev->dev_addr[1] << 16 | priv->netdev->dev_addr[2] << 8 | priv->netdev->dev_addr[3]); u16 payload_dwords; u8 pad_cnt; pad_cnt = -skb->len & 3; /* Includes ICRC */ payload_dwords = ((skb->len + pad_cnt) >> 2) + SIZE_OF_CRC; /* header size in dwords LRH+BTH+DETH = (8+12+8)/4. */ txp->hdr_dwords = 7; if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) { grh = &sdma_hdr->hdr.ibh.u.l.grh; txp->hdr_dwords += hfi1_make_grh(txp->ibp, grh, rdma_ah_read_grh(ah_attr), txp->hdr_dwords - LRH_9B_DWORDS, payload_dwords); lrh0 = HFI1_LRH_GRH; ohdr = &sdma_hdr->hdr.ibh.u.l.oth; } else { lrh0 = HFI1_LRH_BTH; ohdr = &sdma_hdr->hdr.ibh.u.oth; } lrh0 |= (rdma_ah_get_sl(ah_attr) & 0xf) << 4; lrh0 |= (txp->flow.sc5 & 0xf) << 12; dlid = opa_get_lid(rdma_ah_get_dlid(ah_attr), 9B); if (dlid == be16_to_cpu(IB_LID_PERMISSIVE)) { slid = be16_to_cpu(IB_LID_PERMISSIVE); } else { u16 lid = (u16)ppd->lid; if (lid) { lid |= rdma_ah_get_path_bits(ah_attr) & ((1 << ppd->lmc) - 1); slid = lid; } else { slid = be16_to_cpu(IB_LID_PERMISSIVE); } } /* Includes ICRC */ dwords = txp->hdr_dwords + payload_dwords; /* Build the lrh */ sdma_hdr->hdr.hdr_type = HFI1_PKT_TYPE_9B; hfi1_make_ib_hdr(&sdma_hdr->hdr.ibh, lrh0, dwords, dlid, slid); /* Build the bth */ bth0 = (IB_OPCODE_UD_SEND_ONLY << 24) | (pad_cnt << 20) | priv->pkey; ohdr->bth[0] = cpu_to_be32(bth0); ohdr->bth[1] = cpu_to_be32(txp->dqpn); ohdr->bth[2] = cpu_to_be32(mask_psn((u32)txp->txq->tx_ring.sent_txreqs)); /* Build the deth */ ohdr->u.ud.deth[0] = cpu_to_be32(priv->qkey); ohdr->u.ud.deth[1] = cpu_to_be32((txp->entropy << HFI1_IPOIB_ENTROPY_SHIFT) | sqpn); /* Construct the pbc. */ sdma_hdr->pbc = cpu_to_le64(create_pbc(ppd, ib_is_sc5(txp->flow.sc5) << PBC_DC_INFO_SHIFT, 0, sc_to_vlt(priv->dd, txp->flow.sc5), dwords - SIZE_OF_CRC + (sizeof(sdma_hdr->pbc) >> 2))); } static struct ipoib_txreq *hfi1_ipoib_send_dma_common(struct net_device *dev, struct sk_buff *skb, struct ipoib_txparms *txp) { struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev); struct hfi1_ipoib_txq *txq = txp->txq; struct ipoib_txreq *tx; struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring; u32 tail = tx_ring->tail; int ret; if (unlikely(!tx_ring->avail)) { u32 head; if (hfi1_ipoib_used(txq) >= hfi1_ipoib_ring_hwat(txq)) /* This shouldn't happen with a stopped queue */ return ERR_PTR(-ENOMEM); /* See hfi1_ipoib_poll_tx_ring() */ head = smp_load_acquire(&tx_ring->head); tx_ring->avail = min_t(u32, hfi1_ipoib_ring_hwat(txq), CIRC_CNT(head, tail, tx_ring->max_items)); } else { tx_ring->avail--; } tx = hfi1_txreq_from_idx(tx_ring, tail); trace_hfi1_txq_alloc_tx(txq); /* so that we can test if the sdma descriptors are there */ tx->txreq.num_desc = 0; tx->txq = txq; tx->skb = skb; INIT_LIST_HEAD(&tx->txreq.list); hfi1_ipoib_build_ib_tx_headers(tx, txp); ret = hfi1_ipoib_build_tx_desc(tx, txp); if (likely(!ret)) { if (txq->flow.as_int != txp->flow.as_int) { txq->flow.tx_queue = txp->flow.tx_queue; txq->flow.sc5 = txp->flow.sc5; txq->sde = sdma_select_engine_sc(priv->dd, txp->flow.tx_queue, txp->flow.sc5); trace_hfi1_flow_switch(txq); } return tx; } sdma_txclean(priv->dd, &tx->txreq); return ERR_PTR(ret); } static int hfi1_ipoib_submit_tx_list(struct net_device *dev, struct hfi1_ipoib_txq *txq) { int ret; u16 count_out; ret = sdma_send_txlist(txq->sde, iowait_get_ib_work(&txq->wait), &txq->tx_list, &count_out); if (likely(!ret) || ret == -EBUSY || ret == -ECOMM) return ret; dd_dev_warn(txq->priv->dd, "cannot send skb tx list, err %d.\n", ret); return ret; } static int hfi1_ipoib_flush_tx_list(struct net_device *dev, struct hfi1_ipoib_txq *txq) { int ret = 0; if (!list_empty(&txq->tx_list)) { /* Flush the current list */ ret = hfi1_ipoib_submit_tx_list(dev, txq); if (unlikely(ret)) if (ret != -EBUSY) ++dev->stats.tx_carrier_errors; } return ret; } static int hfi1_ipoib_submit_tx(struct hfi1_ipoib_txq *txq, struct ipoib_txreq *tx) { int ret; ret = sdma_send_txreq(txq->sde, iowait_get_ib_work(&txq->wait), &tx->txreq, txq->pkts_sent); if (likely(!ret)) { txq->pkts_sent = true; iowait_starve_clear(txq->pkts_sent, &txq->wait); } return ret; } static int hfi1_ipoib_send_dma_single(struct net_device *dev, struct sk_buff *skb, struct ipoib_txparms *txp) { struct hfi1_ipoib_txq *txq = txp->txq; struct hfi1_ipoib_circ_buf *tx_ring; struct ipoib_txreq *tx; int ret; tx = hfi1_ipoib_send_dma_common(dev, skb, txp); if (IS_ERR(tx)) { int ret = PTR_ERR(tx); dev_kfree_skb_any(skb); if (ret == -ENOMEM) ++dev->stats.tx_errors; else ++dev->stats.tx_carrier_errors; return NETDEV_TX_OK; } tx_ring = &txq->tx_ring; trace_hfi1_tx_consume(tx, tx_ring->tail); /* consume tx */ smp_store_release(&tx_ring->tail, CIRC_NEXT(tx_ring->tail, tx_ring->max_items)); ret = hfi1_ipoib_submit_tx(txq, tx); if (likely(!ret)) { tx_ok: trace_sdma_output_ibhdr(txq->priv->dd, &tx->sdma_hdr->hdr, ib_is_sc5(txp->flow.sc5)); hfi1_ipoib_check_queue_depth(txq); return NETDEV_TX_OK; } txq->pkts_sent = false; if (ret == -EBUSY || ret == -ECOMM) goto tx_ok; /* mark complete and kick napi tx */ smp_store_release(&tx->complete, 1); napi_schedule(&tx->txq->napi); ++dev->stats.tx_carrier_errors; return NETDEV_TX_OK; } static int hfi1_ipoib_send_dma_list(struct net_device *dev, struct sk_buff *skb, struct ipoib_txparms *txp) { struct hfi1_ipoib_txq *txq = txp->txq; struct hfi1_ipoib_circ_buf *tx_ring; struct ipoib_txreq *tx; /* Has the flow change ? */ if (txq->flow.as_int != txp->flow.as_int) { int ret; trace_hfi1_flow_flush(txq); ret = hfi1_ipoib_flush_tx_list(dev, txq); if (unlikely(ret)) { if (ret == -EBUSY) ++dev->stats.tx_dropped; dev_kfree_skb_any(skb); return NETDEV_TX_OK; } } tx = hfi1_ipoib_send_dma_common(dev, skb, txp); if (IS_ERR(tx)) { int ret = PTR_ERR(tx); dev_kfree_skb_any(skb); if (ret == -ENOMEM) ++dev->stats.tx_errors; else ++dev->stats.tx_carrier_errors; return NETDEV_TX_OK; } tx_ring = &txq->tx_ring; trace_hfi1_tx_consume(tx, tx_ring->tail); /* consume tx */ smp_store_release(&tx_ring->tail, CIRC_NEXT(tx_ring->tail, tx_ring->max_items)); list_add_tail(&tx->txreq.list, &txq->tx_list); hfi1_ipoib_check_queue_depth(txq); trace_sdma_output_ibhdr(txq->priv->dd, &tx->sdma_hdr->hdr, ib_is_sc5(txp->flow.sc5)); if (!netdev_xmit_more()) (void)hfi1_ipoib_flush_tx_list(dev, txq); return NETDEV_TX_OK; } static u8 hfi1_ipoib_calc_entropy(struct sk_buff *skb) { if (skb_transport_header_was_set(skb)) { u8 *hdr = (u8 *)skb_transport_header(skb); return (hdr[0] ^ hdr[1] ^ hdr[2] ^ hdr[3]); } return (u8)skb_get_queue_mapping(skb); } int hfi1_ipoib_send(struct net_device *dev, struct sk_buff *skb, struct ib_ah *address, u32 dqpn) { struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev); struct ipoib_txparms txp; struct rdma_netdev *rn = netdev_priv(dev); if (unlikely(skb->len > rn->mtu + HFI1_IPOIB_ENCAP_LEN)) { dd_dev_warn(priv->dd, "packet len %d (> %d) too long to send, dropping\n", skb->len, rn->mtu + HFI1_IPOIB_ENCAP_LEN); ++dev->stats.tx_dropped; ++dev->stats.tx_errors; dev_kfree_skb_any(skb); return NETDEV_TX_OK; } txp.dd = priv->dd; txp.ah_attr = &ibah_to_rvtah(address)->attr; txp.ibp = to_iport(priv->device, priv->port_num); txp.txq = &priv->txqs[skb_get_queue_mapping(skb)]; txp.dqpn = dqpn; txp.flow.sc5 = txp.ibp->sl_to_sc[rdma_ah_get_sl(txp.ah_attr)]; txp.flow.tx_queue = (u8)skb_get_queue_mapping(skb); txp.entropy = hfi1_ipoib_calc_entropy(skb); if (netdev_xmit_more() || !list_empty(&txp.txq->tx_list)) return hfi1_ipoib_send_dma_list(dev, skb, &txp); return hfi1_ipoib_send_dma_single(dev, skb, &txp); } /* * hfi1_ipoib_sdma_sleep - ipoib sdma sleep function * * This function gets called from sdma_send_txreq() when there are not enough * sdma descriptors available to send the packet. It adds Tx queue's wait * structure to sdma engine's dmawait list to be woken up when descriptors * become available. */ static int hfi1_ipoib_sdma_sleep(struct sdma_engine *sde, struct iowait_work *wait, struct sdma_txreq *txreq, uint seq, bool pkts_sent) { struct hfi1_ipoib_txq *txq = container_of(wait->iow, struct hfi1_ipoib_txq, wait); write_seqlock(&sde->waitlock); if (likely(txq->priv->netdev->reg_state == NETREG_REGISTERED)) { if (sdma_progress(sde, seq, txreq)) { write_sequnlock(&sde->waitlock); return -EAGAIN; } if (list_empty(&txreq->list)) /* came from non-list submit */ list_add_tail(&txreq->list, &txq->tx_list); if (list_empty(&txq->wait.list)) { struct hfi1_ibport *ibp = &sde->ppd->ibport_data; if (!atomic_xchg(&txq->tx_ring.no_desc, 1)) { trace_hfi1_txq_queued(txq); hfi1_ipoib_stop_txq(txq); } ibp->rvp.n_dmawait++; iowait_queue(pkts_sent, wait->iow, &sde->dmawait); } write_sequnlock(&sde->waitlock); return -EBUSY; } write_sequnlock(&sde->waitlock); return -EINVAL; } /* * hfi1_ipoib_sdma_wakeup - ipoib sdma wakeup function * * This function gets called when SDMA descriptors becomes available and Tx * queue's wait structure was previously added to sdma engine's dmawait list. */ static void hfi1_ipoib_sdma_wakeup(struct iowait *wait, int reason) { struct hfi1_ipoib_txq *txq = container_of(wait, struct hfi1_ipoib_txq, wait); trace_hfi1_txq_wakeup(txq); if (likely(txq->priv->netdev->reg_state == NETREG_REGISTERED)) iowait_schedule(wait, system_highpri_wq, WORK_CPU_UNBOUND); } static void hfi1_ipoib_flush_txq(struct work_struct *work) { struct iowait_work *ioww = container_of(work, struct iowait_work, iowork); struct iowait *wait = iowait_ioww_to_iow(ioww); struct hfi1_ipoib_txq *txq = container_of(wait, struct hfi1_ipoib_txq, wait); struct net_device *dev = txq->priv->netdev; if (likely(dev->reg_state == NETREG_REGISTERED) && likely(!hfi1_ipoib_flush_tx_list(dev, txq))) if (atomic_xchg(&txq->tx_ring.no_desc, 0)) hfi1_ipoib_wake_txq(txq); } int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv) { struct net_device *dev = priv->netdev; u32 tx_ring_size, tx_item_size; struct hfi1_ipoib_circ_buf *tx_ring; int i, j; /* * Ring holds 1 less than tx_ring_size * Round up to next power of 2 in order to hold at least tx_queue_len */ tx_ring_size = roundup_pow_of_two(dev->tx_queue_len + 1); tx_item_size = roundup_pow_of_two(sizeof(struct ipoib_txreq)); priv->txqs = kcalloc_node(dev->num_tx_queues, sizeof(struct hfi1_ipoib_txq), GFP_KERNEL, priv->dd->node); if (!priv->txqs) return -ENOMEM; for (i = 0; i < dev->num_tx_queues; i++) { struct hfi1_ipoib_txq *txq = &priv->txqs[i]; struct ipoib_txreq *tx; tx_ring = &txq->tx_ring; iowait_init(&txq->wait, 0, hfi1_ipoib_flush_txq, NULL, hfi1_ipoib_sdma_sleep, hfi1_ipoib_sdma_wakeup, NULL, NULL); txq->priv = priv; txq->sde = NULL; INIT_LIST_HEAD(&txq->tx_list); atomic_set(&txq->tx_ring.stops, 0); atomic_set(&txq->tx_ring.ring_full, 0); atomic_set(&txq->tx_ring.no_desc, 0); txq->q_idx = i; txq->flow.tx_queue = 0xff; txq->flow.sc5 = 0xff; txq->pkts_sent = false; netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i), priv->dd->node); txq->tx_ring.items = kvzalloc_node(array_size(tx_ring_size, tx_item_size), GFP_KERNEL, priv->dd->node); if (!txq->tx_ring.items) goto free_txqs; txq->tx_ring.max_items = tx_ring_size; txq->tx_ring.shift = ilog2(tx_item_size); txq->tx_ring.avail = hfi1_ipoib_ring_hwat(txq); tx_ring = &txq->tx_ring; for (j = 0; j < tx_ring_size; j++) { hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr = kzalloc_node(sizeof(*tx->sdma_hdr), GFP_KERNEL, priv->dd->node); if (!hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr) goto free_txqs; } netif_napi_add_tx(dev, &txq->napi, hfi1_ipoib_poll_tx_ring); } return 0; free_txqs: for (i--; i >= 0; i--) { struct hfi1_ipoib_txq *txq = &priv->txqs[i]; netif_napi_del(&txq->napi); tx_ring = &txq->tx_ring; for (j = 0; j < tx_ring_size; j++) kfree(hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr); kvfree(tx_ring->items); } kfree(priv->txqs); priv->txqs = NULL; return -ENOMEM; } static void hfi1_ipoib_drain_tx_list(struct hfi1_ipoib_txq *txq) { struct sdma_txreq *txreq; struct sdma_txreq *txreq_tmp; list_for_each_entry_safe(txreq, txreq_tmp, &txq->tx_list, list) { struct ipoib_txreq *tx = container_of(txreq, struct ipoib_txreq, txreq); list_del(&txreq->list); sdma_txclean(txq->priv->dd, &tx->txreq); dev_kfree_skb_any(tx->skb); tx->skb = NULL; txq->tx_ring.complete_txreqs++; } if (hfi1_ipoib_used(txq)) dd_dev_warn(txq->priv->dd, "txq %d not empty found %u requests\n", txq->q_idx, hfi1_ipoib_txreqs(txq->tx_ring.sent_txreqs, txq->tx_ring.complete_txreqs)); } void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv) { int i, j; for (i = 0; i < priv->netdev->num_tx_queues; i++) { struct hfi1_ipoib_txq *txq = &priv->txqs[i]; struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring; iowait_cancel_work(&txq->wait); iowait_sdma_drain(&txq->wait); hfi1_ipoib_drain_tx_list(txq); netif_napi_del(&txq->napi); hfi1_ipoib_drain_tx_ring(txq); for (j = 0; j < tx_ring->max_items; j++) kfree(hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr); kvfree(tx_ring->items); } kfree(priv->txqs); priv->txqs = NULL; } void hfi1_ipoib_napi_tx_enable(struct net_device *dev) { struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev); int i; for (i = 0; i < dev->num_tx_queues; i++) { struct hfi1_ipoib_txq *txq = &priv->txqs[i]; napi_enable(&txq->napi); } } void hfi1_ipoib_napi_tx_disable(struct net_device *dev) { struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev); int i; for (i = 0; i < dev->num_tx_queues; i++) { struct hfi1_ipoib_txq *txq = &priv->txqs[i]; napi_disable(&txq->napi); hfi1_ipoib_drain_tx_ring(txq); } } void hfi1_ipoib_tx_timeout(struct net_device *dev, unsigned int q) { struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev); struct hfi1_ipoib_txq *txq = &priv->txqs[q]; dd_dev_info(priv->dd, "timeout txq %p q %u stopped %u stops %d no_desc %d ring_full %d\n", txq, q, __netif_subqueue_stopped(dev, txq->q_idx), atomic_read(&txq->tx_ring.stops), atomic_read(&txq->tx_ring.no_desc), atomic_read(&txq->tx_ring.ring_full)); dd_dev_info(priv->dd, "sde %p engine %u\n", txq->sde, txq->sde ? txq->sde->this_idx : 0); dd_dev_info(priv->dd, "flow %x\n", txq->flow.as_int); dd_dev_info(priv->dd, "sent %llu completed %llu used %llu\n", txq->tx_ring.sent_txreqs, txq->tx_ring.complete_txreqs, hfi1_ipoib_used(txq)); dd_dev_info(priv->dd, "tx_queue_len %u max_items %u\n", dev->tx_queue_len, txq->tx_ring.max_items); dd_dev_info(priv->dd, "head %u tail %u\n", txq->tx_ring.head, txq->tx_ring.tail); dd_dev_info(priv->dd, "wait queued %u\n", !list_empty(&txq->wait.list)); dd_dev_info(priv->dd, "tx_list empty %u\n", list_empty(&txq->tx_list)); }
linux-master
drivers/infiniband/hw/hfi1/ipoib_tx.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2020 - 2023 Cornelis Networks, Inc. * Copyright(c) 2015 - 2018 Intel Corporation. */ #include <linux/mm.h> #include <linux/types.h> #include <linux/device.h> #include <linux/dmapool.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/highmem.h> #include <linux/io.h> #include <linux/uio.h> #include <linux/rbtree.h> #include <linux/spinlock.h> #include <linux/delay.h> #include <linux/kthread.h> #include <linux/mmu_context.h> #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/string.h> #include "hfi.h" #include "sdma.h" #include "user_sdma.h" #include "verbs.h" /* for the headers */ #include "common.h" /* for struct hfi1_tid_info */ #include "trace.h" static uint hfi1_sdma_comp_ring_size = 128; module_param_named(sdma_comp_size, hfi1_sdma_comp_ring_size, uint, S_IRUGO); MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 128"); static unsigned initial_pkt_count = 8; static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts); static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status); static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq); static void user_sdma_free_request(struct user_sdma_request *req); static int check_header_template(struct user_sdma_request *req, struct hfi1_pkt_header *hdr, u32 lrhlen, u32 datalen); static int set_txreq_header(struct user_sdma_request *req, struct user_sdma_txreq *tx, u32 datalen); static int set_txreq_header_ahg(struct user_sdma_request *req, struct user_sdma_txreq *tx, u32 len); static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq, struct hfi1_user_sdma_comp_q *cq, u16 idx, enum hfi1_sdma_comp_state state, int ret); static inline u32 set_pkt_bth_psn(__be32 bthpsn, u8 expct, u32 frags); static inline u32 get_lrh_len(struct hfi1_pkt_header, u32 len); static int defer_packet_queue( struct sdma_engine *sde, struct iowait_work *wait, struct sdma_txreq *txreq, uint seq, bool pkts_sent); static void activate_packet_queue(struct iowait *wait, int reason); static int defer_packet_queue( struct sdma_engine *sde, struct iowait_work *wait, struct sdma_txreq *txreq, uint seq, bool pkts_sent) { struct hfi1_user_sdma_pkt_q *pq = container_of(wait->iow, struct hfi1_user_sdma_pkt_q, busy); write_seqlock(&sde->waitlock); trace_hfi1_usdma_defer(pq, sde, &pq->busy); if (sdma_progress(sde, seq, txreq)) goto eagain; /* * We are assuming that if the list is enqueued somewhere, it * is to the dmawait list since that is the only place where * it is supposed to be enqueued. */ xchg(&pq->state, SDMA_PKT_Q_DEFERRED); if (list_empty(&pq->busy.list)) { pq->busy.lock = &sde->waitlock; iowait_get_priority(&pq->busy); iowait_queue(pkts_sent, &pq->busy, &sde->dmawait); } write_sequnlock(&sde->waitlock); return -EBUSY; eagain: write_sequnlock(&sde->waitlock); return -EAGAIN; } static void activate_packet_queue(struct iowait *wait, int reason) { struct hfi1_user_sdma_pkt_q *pq = container_of(wait, struct hfi1_user_sdma_pkt_q, busy); trace_hfi1_usdma_activate(pq, wait, reason); xchg(&pq->state, SDMA_PKT_Q_ACTIVE); wake_up(&wait->wait_dma); }; int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct hfi1_filedata *fd) { int ret = -ENOMEM; char buf[64]; struct hfi1_devdata *dd; struct hfi1_user_sdma_comp_q *cq; struct hfi1_user_sdma_pkt_q *pq; if (!uctxt || !fd) return -EBADF; if (!hfi1_sdma_comp_ring_size) return -EINVAL; dd = uctxt->dd; pq = kzalloc(sizeof(*pq), GFP_KERNEL); if (!pq) return -ENOMEM; pq->dd = dd; pq->ctxt = uctxt->ctxt; pq->subctxt = fd->subctxt; pq->n_max_reqs = hfi1_sdma_comp_ring_size; atomic_set(&pq->n_reqs, 0); init_waitqueue_head(&pq->wait); atomic_set(&pq->n_locked, 0); iowait_init(&pq->busy, 0, NULL, NULL, defer_packet_queue, activate_packet_queue, NULL, NULL); pq->reqidx = 0; pq->reqs = kcalloc(hfi1_sdma_comp_ring_size, sizeof(*pq->reqs), GFP_KERNEL); if (!pq->reqs) goto pq_reqs_nomem; pq->req_in_use = bitmap_zalloc(hfi1_sdma_comp_ring_size, GFP_KERNEL); if (!pq->req_in_use) goto pq_reqs_no_in_use; snprintf(buf, 64, "txreq-kmem-cache-%u-%u-%u", dd->unit, uctxt->ctxt, fd->subctxt); pq->txreq_cache = kmem_cache_create(buf, sizeof(struct user_sdma_txreq), L1_CACHE_BYTES, SLAB_HWCACHE_ALIGN, NULL); if (!pq->txreq_cache) { dd_dev_err(dd, "[%u] Failed to allocate TxReq cache\n", uctxt->ctxt); goto pq_txreq_nomem; } cq = kzalloc(sizeof(*cq), GFP_KERNEL); if (!cq) goto cq_nomem; cq->comps = vmalloc_user(PAGE_ALIGN(sizeof(*cq->comps) * hfi1_sdma_comp_ring_size)); if (!cq->comps) goto cq_comps_nomem; cq->nentries = hfi1_sdma_comp_ring_size; ret = hfi1_init_system_pinning(pq); if (ret) goto pq_mmu_fail; rcu_assign_pointer(fd->pq, pq); fd->cq = cq; return 0; pq_mmu_fail: vfree(cq->comps); cq_comps_nomem: kfree(cq); cq_nomem: kmem_cache_destroy(pq->txreq_cache); pq_txreq_nomem: bitmap_free(pq->req_in_use); pq_reqs_no_in_use: kfree(pq->reqs); pq_reqs_nomem: kfree(pq); return ret; } static void flush_pq_iowait(struct hfi1_user_sdma_pkt_q *pq) { unsigned long flags; seqlock_t *lock = pq->busy.lock; if (!lock) return; write_seqlock_irqsave(lock, flags); if (!list_empty(&pq->busy.list)) { list_del_init(&pq->busy.list); pq->busy.lock = NULL; } write_sequnlock_irqrestore(lock, flags); } int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd, struct hfi1_ctxtdata *uctxt) { struct hfi1_user_sdma_pkt_q *pq; trace_hfi1_sdma_user_free_queues(uctxt->dd, uctxt->ctxt, fd->subctxt); spin_lock(&fd->pq_rcu_lock); pq = srcu_dereference_check(fd->pq, &fd->pq_srcu, lockdep_is_held(&fd->pq_rcu_lock)); if (pq) { rcu_assign_pointer(fd->pq, NULL); spin_unlock(&fd->pq_rcu_lock); synchronize_srcu(&fd->pq_srcu); /* at this point there can be no more new requests */ iowait_sdma_drain(&pq->busy); /* Wait until all requests have been freed. */ wait_event_interruptible( pq->wait, !atomic_read(&pq->n_reqs)); kfree(pq->reqs); hfi1_free_system_pinning(pq); bitmap_free(pq->req_in_use); kmem_cache_destroy(pq->txreq_cache); flush_pq_iowait(pq); kfree(pq); } else { spin_unlock(&fd->pq_rcu_lock); } if (fd->cq) { vfree(fd->cq->comps); kfree(fd->cq); fd->cq = NULL; } return 0; } static u8 dlid_to_selector(u16 dlid) { static u8 mapping[256]; static int initialized; static u8 next; int hash; if (!initialized) { memset(mapping, 0xFF, 256); initialized = 1; } hash = ((dlid >> 8) ^ dlid) & 0xFF; if (mapping[hash] == 0xFF) { mapping[hash] = next; next = (next + 1) & 0x7F; } return mapping[hash]; } /** * hfi1_user_sdma_process_request() - Process and start a user sdma request * @fd: valid file descriptor * @iovec: array of io vectors to process * @dim: overall iovec array size * @count: number of io vector array entries processed */ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, struct iovec *iovec, unsigned long dim, unsigned long *count) { int ret = 0, i; struct hfi1_ctxtdata *uctxt = fd->uctxt; struct hfi1_user_sdma_pkt_q *pq = srcu_dereference(fd->pq, &fd->pq_srcu); struct hfi1_user_sdma_comp_q *cq = fd->cq; struct hfi1_devdata *dd = pq->dd; unsigned long idx = 0; u8 pcount = initial_pkt_count; struct sdma_req_info info; struct user_sdma_request *req; u8 opcode, sc, vl; u16 pkey; u32 slid; u16 dlid; u32 selector; if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) { hfi1_cdbg( SDMA, "[%u:%u:%u] First vector not big enough for header %lu/%lu", dd->unit, uctxt->ctxt, fd->subctxt, iovec[idx].iov_len, sizeof(info) + sizeof(req->hdr)); return -EINVAL; } ret = copy_from_user(&info, iovec[idx].iov_base, sizeof(info)); if (ret) { hfi1_cdbg(SDMA, "[%u:%u:%u] Failed to copy info QW (%d)", dd->unit, uctxt->ctxt, fd->subctxt, ret); return -EFAULT; } trace_hfi1_sdma_user_reqinfo(dd, uctxt->ctxt, fd->subctxt, (u16 *)&info); if (info.comp_idx >= hfi1_sdma_comp_ring_size) { hfi1_cdbg(SDMA, "[%u:%u:%u:%u] Invalid comp index", dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx); return -EINVAL; } /* * Sanity check the header io vector count. Need at least 1 vector * (header) and cannot be larger than the actual io vector count. */ if (req_iovcnt(info.ctrl) < 1 || req_iovcnt(info.ctrl) > dim) { hfi1_cdbg(SDMA, "[%u:%u:%u:%u] Invalid iov count %d, dim %ld", dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx, req_iovcnt(info.ctrl), dim); return -EINVAL; } if (!info.fragsize) { hfi1_cdbg(SDMA, "[%u:%u:%u:%u] Request does not specify fragsize", dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx); return -EINVAL; } /* Try to claim the request. */ if (test_and_set_bit(info.comp_idx, pq->req_in_use)) { hfi1_cdbg(SDMA, "[%u:%u:%u] Entry %u is in use", dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx); return -EBADSLT; } /* * All safety checks have been done and this request has been claimed. */ trace_hfi1_sdma_user_process_request(dd, uctxt->ctxt, fd->subctxt, info.comp_idx); req = pq->reqs + info.comp_idx; req->data_iovs = req_iovcnt(info.ctrl) - 1; /* subtract header vector */ req->data_len = 0; req->pq = pq; req->cq = cq; req->ahg_idx = -1; req->iov_idx = 0; req->sent = 0; req->seqnum = 0; req->seqcomp = 0; req->seqsubmitted = 0; req->tids = NULL; req->has_error = 0; INIT_LIST_HEAD(&req->txps); memcpy(&req->info, &info, sizeof(info)); /* The request is initialized, count it */ atomic_inc(&pq->n_reqs); if (req_opcode(info.ctrl) == EXPECTED) { /* expected must have a TID info and at least one data vector */ if (req->data_iovs < 2) { SDMA_DBG(req, "Not enough vectors for expected request"); ret = -EINVAL; goto free_req; } req->data_iovs--; } if (!info.npkts || req->data_iovs > MAX_VECTORS_PER_REQ) { SDMA_DBG(req, "Too many vectors (%u/%u)", req->data_iovs, MAX_VECTORS_PER_REQ); ret = -EINVAL; goto free_req; } /* Copy the header from the user buffer */ ret = copy_from_user(&req->hdr, iovec[idx].iov_base + sizeof(info), sizeof(req->hdr)); if (ret) { SDMA_DBG(req, "Failed to copy header template (%d)", ret); ret = -EFAULT; goto free_req; } /* If Static rate control is not enabled, sanitize the header. */ if (!HFI1_CAP_IS_USET(STATIC_RATE_CTRL)) req->hdr.pbc[2] = 0; /* Validate the opcode. Do not trust packets from user space blindly. */ opcode = (be32_to_cpu(req->hdr.bth[0]) >> 24) & 0xff; if ((opcode & USER_OPCODE_CHECK_MASK) != USER_OPCODE_CHECK_VAL) { SDMA_DBG(req, "Invalid opcode (%d)", opcode); ret = -EINVAL; goto free_req; } /* * Validate the vl. Do not trust packets from user space blindly. * VL comes from PBC, SC comes from LRH, and the VL needs to * match the SC look up. */ vl = (le16_to_cpu(req->hdr.pbc[0]) >> 12) & 0xF; sc = (((be16_to_cpu(req->hdr.lrh[0]) >> 12) & 0xF) | (((le16_to_cpu(req->hdr.pbc[1]) >> 14) & 0x1) << 4)); if (vl >= dd->pport->vls_operational || vl != sc_to_vlt(dd, sc)) { SDMA_DBG(req, "Invalid SC(%u)/VL(%u)", sc, vl); ret = -EINVAL; goto free_req; } /* Checking P_KEY for requests from user-space */ pkey = (u16)be32_to_cpu(req->hdr.bth[0]); slid = be16_to_cpu(req->hdr.lrh[3]); if (egress_pkey_check(dd->pport, slid, pkey, sc, PKEY_CHECK_INVALID)) { ret = -EINVAL; goto free_req; } /* * Also should check the BTH.lnh. If it says the next header is GRH then * the RXE parsing will be off and will land in the middle of the KDETH * or miss it entirely. */ if ((be16_to_cpu(req->hdr.lrh[0]) & 0x3) == HFI1_LRH_GRH) { SDMA_DBG(req, "User tried to pass in a GRH"); ret = -EINVAL; goto free_req; } req->koffset = le32_to_cpu(req->hdr.kdeth.swdata[6]); /* * Calculate the initial TID offset based on the values of * KDETH.OFFSET and KDETH.OM that are passed in. */ req->tidoffset = KDETH_GET(req->hdr.kdeth.ver_tid_offset, OFFSET) * (KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ? KDETH_OM_LARGE : KDETH_OM_SMALL); trace_hfi1_sdma_user_initial_tidoffset(dd, uctxt->ctxt, fd->subctxt, info.comp_idx, req->tidoffset); idx++; /* Save all the IO vector structures */ for (i = 0; i < req->data_iovs; i++) { req->iovs[i].offset = 0; INIT_LIST_HEAD(&req->iovs[i].list); memcpy(&req->iovs[i].iov, iovec + idx++, sizeof(req->iovs[i].iov)); if (req->iovs[i].iov.iov_len == 0) { ret = -EINVAL; goto free_req; } req->data_len += req->iovs[i].iov.iov_len; } trace_hfi1_sdma_user_data_length(dd, uctxt->ctxt, fd->subctxt, info.comp_idx, req->data_len); if (pcount > req->info.npkts) pcount = req->info.npkts; /* * Copy any TID info * User space will provide the TID info only when the * request type is EXPECTED. This is true even if there is * only one packet in the request and the header is already * setup. The reason for the singular TID case is that the * driver needs to perform safety checks. */ if (req_opcode(req->info.ctrl) == EXPECTED) { u16 ntids = iovec[idx].iov_len / sizeof(*req->tids); u32 *tmp; if (!ntids || ntids > MAX_TID_PAIR_ENTRIES) { ret = -EINVAL; goto free_req; } /* * We have to copy all of the tids because they may vary * in size and, therefore, the TID count might not be * equal to the pkt count. However, there is no way to * tell at this point. */ tmp = memdup_user(iovec[idx].iov_base, ntids * sizeof(*req->tids)); if (IS_ERR(tmp)) { ret = PTR_ERR(tmp); SDMA_DBG(req, "Failed to copy %d TIDs (%d)", ntids, ret); goto free_req; } req->tids = tmp; req->n_tids = ntids; req->tididx = 0; idx++; } dlid = be16_to_cpu(req->hdr.lrh[1]); selector = dlid_to_selector(dlid); selector += uctxt->ctxt + fd->subctxt; req->sde = sdma_select_user_engine(dd, selector, vl); if (!req->sde || !sdma_running(req->sde)) { ret = -ECOMM; goto free_req; } /* We don't need an AHG entry if the request contains only one packet */ if (req->info.npkts > 1 && HFI1_CAP_IS_USET(SDMA_AHG)) req->ahg_idx = sdma_ahg_alloc(req->sde); set_comp_state(pq, cq, info.comp_idx, QUEUED, 0); pq->state = SDMA_PKT_Q_ACTIVE; /* * This is a somewhat blocking send implementation. * The driver will block the caller until all packets of the * request have been submitted to the SDMA engine. However, it * will not wait for send completions. */ while (req->seqsubmitted != req->info.npkts) { ret = user_sdma_send_pkts(req, pcount); if (ret < 0) { int we_ret; if (ret != -EBUSY) goto free_req; we_ret = wait_event_interruptible_timeout( pq->busy.wait_dma, pq->state == SDMA_PKT_Q_ACTIVE, msecs_to_jiffies( SDMA_IOWAIT_TIMEOUT)); trace_hfi1_usdma_we(pq, we_ret); if (we_ret <= 0) flush_pq_iowait(pq); } } *count += idx; return 0; free_req: /* * If the submitted seqsubmitted == npkts, the completion routine * controls the final state. If sequbmitted < npkts, wait for any * outstanding packets to finish before cleaning up. */ if (req->seqsubmitted < req->info.npkts) { if (req->seqsubmitted) wait_event(pq->busy.wait_dma, (req->seqcomp == req->seqsubmitted - 1)); user_sdma_free_request(req); pq_update(pq); set_comp_state(pq, cq, info.comp_idx, ERROR, ret); } return ret; } static inline u32 compute_data_length(struct user_sdma_request *req, struct user_sdma_txreq *tx) { /* * Determine the proper size of the packet data. * The size of the data of the first packet is in the header * template. However, it includes the header and ICRC, which need * to be subtracted. * The minimum representable packet data length in a header is 4 bytes, * therefore, when the data length request is less than 4 bytes, there's * only one packet, and the packet data length is equal to that of the * request data length. * The size of the remaining packets is the minimum of the frag * size (MTU) or remaining data in the request. */ u32 len; if (!req->seqnum) { if (req->data_len < sizeof(u32)) len = req->data_len; else len = ((be16_to_cpu(req->hdr.lrh[2]) << 2) - (sizeof(tx->hdr) - 4)); } else if (req_opcode(req->info.ctrl) == EXPECTED) { u32 tidlen = EXP_TID_GET(req->tids[req->tididx], LEN) * PAGE_SIZE; /* * Get the data length based on the remaining space in the * TID pair. */ len = min(tidlen - req->tidoffset, (u32)req->info.fragsize); /* If we've filled up the TID pair, move to the next one. */ if (unlikely(!len) && ++req->tididx < req->n_tids && req->tids[req->tididx]) { tidlen = EXP_TID_GET(req->tids[req->tididx], LEN) * PAGE_SIZE; req->tidoffset = 0; len = min_t(u32, tidlen, req->info.fragsize); } /* * Since the TID pairs map entire pages, make sure that we * are not going to try to send more data that we have * remaining. */ len = min(len, req->data_len - req->sent); } else { len = min(req->data_len - req->sent, (u32)req->info.fragsize); } trace_hfi1_sdma_user_compute_length(req->pq->dd, req->pq->ctxt, req->pq->subctxt, req->info.comp_idx, len); return len; } static inline u32 pad_len(u32 len) { if (len & (sizeof(u32) - 1)) len += sizeof(u32) - (len & (sizeof(u32) - 1)); return len; } static inline u32 get_lrh_len(struct hfi1_pkt_header hdr, u32 len) { /* (Size of complete header - size of PBC) + 4B ICRC + data length */ return ((sizeof(hdr) - sizeof(hdr.pbc)) + 4 + len); } static int user_sdma_txadd_ahg(struct user_sdma_request *req, struct user_sdma_txreq *tx, u32 datalen) { int ret; u16 pbclen = le16_to_cpu(req->hdr.pbc[0]); u32 lrhlen = get_lrh_len(req->hdr, pad_len(datalen)); struct hfi1_user_sdma_pkt_q *pq = req->pq; /* * Copy the request header into the tx header * because the HW needs a cacheline-aligned * address. * This copy can be optimized out if the hdr * member of user_sdma_request were also * cacheline aligned. */ memcpy(&tx->hdr, &req->hdr, sizeof(tx->hdr)); if (PBC2LRH(pbclen) != lrhlen) { pbclen = (pbclen & 0xf000) | LRH2PBC(lrhlen); tx->hdr.pbc[0] = cpu_to_le16(pbclen); } ret = check_header_template(req, &tx->hdr, lrhlen, datalen); if (ret) return ret; ret = sdma_txinit_ahg(&tx->txreq, SDMA_TXREQ_F_AHG_COPY, sizeof(tx->hdr) + datalen, req->ahg_idx, 0, NULL, 0, user_sdma_txreq_cb); if (ret) return ret; ret = sdma_txadd_kvaddr(pq->dd, &tx->txreq, &tx->hdr, sizeof(tx->hdr)); if (ret) sdma_txclean(pq->dd, &tx->txreq); return ret; } static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts) { int ret = 0; u16 count; unsigned npkts = 0; struct user_sdma_txreq *tx = NULL; struct hfi1_user_sdma_pkt_q *pq = NULL; struct user_sdma_iovec *iovec = NULL; if (!req->pq) return -EINVAL; pq = req->pq; /* If tx completion has reported an error, we are done. */ if (READ_ONCE(req->has_error)) return -EFAULT; /* * Check if we might have sent the entire request already */ if (unlikely(req->seqnum == req->info.npkts)) { if (!list_empty(&req->txps)) goto dosend; return ret; } if (!maxpkts || maxpkts > req->info.npkts - req->seqnum) maxpkts = req->info.npkts - req->seqnum; while (npkts < maxpkts) { u32 datalen = 0; /* * Check whether any of the completions have come back * with errors. If so, we are not going to process any * more packets from this request. */ if (READ_ONCE(req->has_error)) return -EFAULT; tx = kmem_cache_alloc(pq->txreq_cache, GFP_KERNEL); if (!tx) return -ENOMEM; tx->flags = 0; tx->req = req; INIT_LIST_HEAD(&tx->list); /* * For the last packet set the ACK request * and disable header suppression. */ if (req->seqnum == req->info.npkts - 1) tx->flags |= (TXREQ_FLAGS_REQ_ACK | TXREQ_FLAGS_REQ_DISABLE_SH); /* * Calculate the payload size - this is min of the fragment * (MTU) size or the remaining bytes in the request but only * if we have payload data. */ if (req->data_len) { iovec = &req->iovs[req->iov_idx]; if (READ_ONCE(iovec->offset) == iovec->iov.iov_len) { if (++req->iov_idx == req->data_iovs) { ret = -EFAULT; goto free_tx; } iovec = &req->iovs[req->iov_idx]; WARN_ON(iovec->offset); } datalen = compute_data_length(req, tx); /* * Disable header suppression for the payload <= 8DWS. * If there is an uncorrectable error in the receive * data FIFO when the received payload size is less than * or equal to 8DWS then the RxDmaDataFifoRdUncErr is * not reported.There is set RHF.EccErr if the header * is not suppressed. */ if (!datalen) { SDMA_DBG(req, "Request has data but pkt len is 0"); ret = -EFAULT; goto free_tx; } else if (datalen <= 32) { tx->flags |= TXREQ_FLAGS_REQ_DISABLE_SH; } } if (req->ahg_idx >= 0) { if (!req->seqnum) { ret = user_sdma_txadd_ahg(req, tx, datalen); if (ret) goto free_tx; } else { int changes; changes = set_txreq_header_ahg(req, tx, datalen); if (changes < 0) { ret = changes; goto free_tx; } } } else { ret = sdma_txinit(&tx->txreq, 0, sizeof(req->hdr) + datalen, user_sdma_txreq_cb); if (ret) goto free_tx; /* * Modify the header for this packet. This only needs * to be done if we are not going to use AHG. Otherwise, * the HW will do it based on the changes we gave it * during sdma_txinit_ahg(). */ ret = set_txreq_header(req, tx, datalen); if (ret) goto free_txreq; } req->koffset += datalen; if (req_opcode(req->info.ctrl) == EXPECTED) req->tidoffset += datalen; req->sent += datalen; while (datalen) { ret = hfi1_add_pages_to_sdma_packet(req, tx, iovec, &datalen); if (ret) goto free_txreq; iovec = &req->iovs[req->iov_idx]; } list_add_tail(&tx->txreq.list, &req->txps); /* * It is important to increment this here as it is used to * generate the BTH.PSN and, therefore, can't be bulk-updated * outside of the loop. */ tx->seqnum = req->seqnum++; npkts++; } dosend: ret = sdma_send_txlist(req->sde, iowait_get_ib_work(&pq->busy), &req->txps, &count); req->seqsubmitted += count; if (req->seqsubmitted == req->info.npkts) { /* * The txreq has already been submitted to the HW queue * so we can free the AHG entry now. Corruption will not * happen due to the sequential manner in which * descriptors are processed. */ if (req->ahg_idx >= 0) sdma_ahg_free(req->sde, req->ahg_idx); } return ret; free_txreq: sdma_txclean(pq->dd, &tx->txreq); free_tx: kmem_cache_free(pq->txreq_cache, tx); return ret; } static int check_header_template(struct user_sdma_request *req, struct hfi1_pkt_header *hdr, u32 lrhlen, u32 datalen) { /* * Perform safety checks for any type of packet: * - transfer size is multiple of 64bytes * - packet length is multiple of 4 bytes * - packet length is not larger than MTU size * * These checks are only done for the first packet of the * transfer since the header is "given" to us by user space. * For the remainder of the packets we compute the values. */ if (req->info.fragsize % PIO_BLOCK_SIZE || lrhlen & 0x3 || lrhlen > get_lrh_len(*hdr, req->info.fragsize)) return -EINVAL; if (req_opcode(req->info.ctrl) == EXPECTED) { /* * The header is checked only on the first packet. Furthermore, * we ensure that at least one TID entry is copied when the * request is submitted. Therefore, we don't have to verify that * tididx points to something sane. */ u32 tidval = req->tids[req->tididx], tidlen = EXP_TID_GET(tidval, LEN) * PAGE_SIZE, tididx = EXP_TID_GET(tidval, IDX), tidctrl = EXP_TID_GET(tidval, CTRL), tidoff; __le32 kval = hdr->kdeth.ver_tid_offset; tidoff = KDETH_GET(kval, OFFSET) * (KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ? KDETH_OM_LARGE : KDETH_OM_SMALL); /* * Expected receive packets have the following * additional checks: * - offset is not larger than the TID size * - TIDCtrl values match between header and TID array * - TID indexes match between header and TID array */ if ((tidoff + datalen > tidlen) || KDETH_GET(kval, TIDCTRL) != tidctrl || KDETH_GET(kval, TID) != tididx) return -EINVAL; } return 0; } /* * Correctly set the BTH.PSN field based on type of * transfer - eager packets can just increment the PSN but * expected packets encode generation and sequence in the * BTH.PSN field so just incrementing will result in errors. */ static inline u32 set_pkt_bth_psn(__be32 bthpsn, u8 expct, u32 frags) { u32 val = be32_to_cpu(bthpsn), mask = (HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffffull : 0xffffffull), psn = val & mask; if (expct) psn = (psn & ~HFI1_KDETH_BTH_SEQ_MASK) | ((psn + frags) & HFI1_KDETH_BTH_SEQ_MASK); else psn = psn + frags; return psn & mask; } static int set_txreq_header(struct user_sdma_request *req, struct user_sdma_txreq *tx, u32 datalen) { struct hfi1_user_sdma_pkt_q *pq = req->pq; struct hfi1_pkt_header *hdr = &tx->hdr; u8 omfactor; /* KDETH.OM */ u16 pbclen; int ret; u32 tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(datalen)); /* Copy the header template to the request before modification */ memcpy(hdr, &req->hdr, sizeof(*hdr)); /* * Check if the PBC and LRH length are mismatched. If so * adjust both in the header. */ pbclen = le16_to_cpu(hdr->pbc[0]); if (PBC2LRH(pbclen) != lrhlen) { pbclen = (pbclen & 0xf000) | LRH2PBC(lrhlen); hdr->pbc[0] = cpu_to_le16(pbclen); hdr->lrh[2] = cpu_to_be16(lrhlen >> 2); /* * Third packet * This is the first packet in the sequence that has * a "static" size that can be used for the rest of * the packets (besides the last one). */ if (unlikely(req->seqnum == 2)) { /* * From this point on the lengths in both the * PBC and LRH are the same until the last * packet. * Adjust the template so we don't have to update * every packet */ req->hdr.pbc[0] = hdr->pbc[0]; req->hdr.lrh[2] = hdr->lrh[2]; } } /* * We only have to modify the header if this is not the * first packet in the request. Otherwise, we use the * header given to us. */ if (unlikely(!req->seqnum)) { ret = check_header_template(req, hdr, lrhlen, datalen); if (ret) return ret; goto done; } hdr->bth[2] = cpu_to_be32( set_pkt_bth_psn(hdr->bth[2], (req_opcode(req->info.ctrl) == EXPECTED), req->seqnum)); /* Set ACK request on last packet */ if (unlikely(tx->flags & TXREQ_FLAGS_REQ_ACK)) hdr->bth[2] |= cpu_to_be32(1UL << 31); /* Set the new offset */ hdr->kdeth.swdata[6] = cpu_to_le32(req->koffset); /* Expected packets have to fill in the new TID information */ if (req_opcode(req->info.ctrl) == EXPECTED) { tidval = req->tids[req->tididx]; /* * If the offset puts us at the end of the current TID, * advance everything. */ if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) * PAGE_SIZE)) { req->tidoffset = 0; /* * Since we don't copy all the TIDs, all at once, * we have to check again. */ if (++req->tididx > req->n_tids - 1 || !req->tids[req->tididx]) { return -EINVAL; } tidval = req->tids[req->tididx]; } omfactor = EXP_TID_GET(tidval, LEN) * PAGE_SIZE >= KDETH_OM_MAX_SIZE ? KDETH_OM_LARGE_SHIFT : KDETH_OM_SMALL_SHIFT; /* Set KDETH.TIDCtrl based on value for this TID. */ KDETH_SET(hdr->kdeth.ver_tid_offset, TIDCTRL, EXP_TID_GET(tidval, CTRL)); /* Set KDETH.TID based on value for this TID */ KDETH_SET(hdr->kdeth.ver_tid_offset, TID, EXP_TID_GET(tidval, IDX)); /* Clear KDETH.SH when DISABLE_SH flag is set */ if (unlikely(tx->flags & TXREQ_FLAGS_REQ_DISABLE_SH)) KDETH_SET(hdr->kdeth.ver_tid_offset, SH, 0); /* * Set the KDETH.OFFSET and KDETH.OM based on size of * transfer. */ trace_hfi1_sdma_user_tid_info( pq->dd, pq->ctxt, pq->subctxt, req->info.comp_idx, req->tidoffset, req->tidoffset >> omfactor, omfactor != KDETH_OM_SMALL_SHIFT); KDETH_SET(hdr->kdeth.ver_tid_offset, OFFSET, req->tidoffset >> omfactor); KDETH_SET(hdr->kdeth.ver_tid_offset, OM, omfactor != KDETH_OM_SMALL_SHIFT); } done: trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt, req->info.comp_idx, hdr, tidval); return sdma_txadd_kvaddr(pq->dd, &tx->txreq, hdr, sizeof(*hdr)); } static int set_txreq_header_ahg(struct user_sdma_request *req, struct user_sdma_txreq *tx, u32 datalen) { u32 ahg[AHG_KDETH_ARRAY_SIZE]; int idx = 0; u8 omfactor; /* KDETH.OM */ struct hfi1_user_sdma_pkt_q *pq = req->pq; struct hfi1_pkt_header *hdr = &req->hdr; u16 pbclen = le16_to_cpu(hdr->pbc[0]); u32 val32, tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(datalen)); size_t array_size = ARRAY_SIZE(ahg); if (PBC2LRH(pbclen) != lrhlen) { /* PBC.PbcLengthDWs */ idx = ahg_header_set(ahg, idx, array_size, 0, 0, 12, (__force u16)cpu_to_le16(LRH2PBC(lrhlen))); if (idx < 0) return idx; /* LRH.PktLen (we need the full 16 bits due to byte swap) */ idx = ahg_header_set(ahg, idx, array_size, 3, 0, 16, (__force u16)cpu_to_be16(lrhlen >> 2)); if (idx < 0) return idx; } /* * Do the common updates */ /* BTH.PSN and BTH.A */ val32 = (be32_to_cpu(hdr->bth[2]) + req->seqnum) & (HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffff : 0xffffff); if (unlikely(tx->flags & TXREQ_FLAGS_REQ_ACK)) val32 |= 1UL << 31; idx = ahg_header_set(ahg, idx, array_size, 6, 0, 16, (__force u16)cpu_to_be16(val32 >> 16)); if (idx < 0) return idx; idx = ahg_header_set(ahg, idx, array_size, 6, 16, 16, (__force u16)cpu_to_be16(val32 & 0xffff)); if (idx < 0) return idx; /* KDETH.Offset */ idx = ahg_header_set(ahg, idx, array_size, 15, 0, 16, (__force u16)cpu_to_le16(req->koffset & 0xffff)); if (idx < 0) return idx; idx = ahg_header_set(ahg, idx, array_size, 15, 16, 16, (__force u16)cpu_to_le16(req->koffset >> 16)); if (idx < 0) return idx; if (req_opcode(req->info.ctrl) == EXPECTED) { __le16 val; tidval = req->tids[req->tididx]; /* * If the offset puts us at the end of the current TID, * advance everything. */ if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) * PAGE_SIZE)) { req->tidoffset = 0; /* * Since we don't copy all the TIDs, all at once, * we have to check again. */ if (++req->tididx > req->n_tids - 1 || !req->tids[req->tididx]) return -EINVAL; tidval = req->tids[req->tididx]; } omfactor = ((EXP_TID_GET(tidval, LEN) * PAGE_SIZE) >= KDETH_OM_MAX_SIZE) ? KDETH_OM_LARGE_SHIFT : KDETH_OM_SMALL_SHIFT; /* KDETH.OM and KDETH.OFFSET (TID) */ idx = ahg_header_set( ahg, idx, array_size, 7, 0, 16, ((!!(omfactor - KDETH_OM_SMALL_SHIFT)) << 15 | ((req->tidoffset >> omfactor) & 0x7fff))); if (idx < 0) return idx; /* KDETH.TIDCtrl, KDETH.TID, KDETH.Intr, KDETH.SH */ val = cpu_to_le16(((EXP_TID_GET(tidval, CTRL) & 0x3) << 10) | (EXP_TID_GET(tidval, IDX) & 0x3ff)); if (unlikely(tx->flags & TXREQ_FLAGS_REQ_DISABLE_SH)) { val |= cpu_to_le16((KDETH_GET(hdr->kdeth.ver_tid_offset, INTR) << AHG_KDETH_INTR_SHIFT)); } else { val |= KDETH_GET(hdr->kdeth.ver_tid_offset, SH) ? cpu_to_le16(0x1 << AHG_KDETH_SH_SHIFT) : cpu_to_le16((KDETH_GET(hdr->kdeth.ver_tid_offset, INTR) << AHG_KDETH_INTR_SHIFT)); } idx = ahg_header_set(ahg, idx, array_size, 7, 16, 14, (__force u16)val); if (idx < 0) return idx; } trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt, req->info.comp_idx, req->sde->this_idx, req->ahg_idx, ahg, idx, tidval); sdma_txinit_ahg(&tx->txreq, SDMA_TXREQ_F_USE_AHG, datalen, req->ahg_idx, idx, ahg, sizeof(req->hdr), user_sdma_txreq_cb); return idx; } /** * user_sdma_txreq_cb() - SDMA tx request completion callback. * @txreq: valid sdma tx request * @status: success/failure of request * * Called when the SDMA progress state machine gets notification that * the SDMA descriptors for this tx request have been processed by the * DMA engine. Called in interrupt context. * Only do work on completed sequences. */ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status) { struct user_sdma_txreq *tx = container_of(txreq, struct user_sdma_txreq, txreq); struct user_sdma_request *req; struct hfi1_user_sdma_pkt_q *pq; struct hfi1_user_sdma_comp_q *cq; enum hfi1_sdma_comp_state state = COMPLETE; if (!tx->req) return; req = tx->req; pq = req->pq; cq = req->cq; if (status != SDMA_TXREQ_S_OK) { SDMA_DBG(req, "SDMA completion with error %d", status); WRITE_ONCE(req->has_error, 1); state = ERROR; } req->seqcomp = tx->seqnum; kmem_cache_free(pq->txreq_cache, tx); /* sequence isn't complete? We are done */ if (req->seqcomp != req->info.npkts - 1) return; user_sdma_free_request(req); set_comp_state(pq, cq, req->info.comp_idx, state, status); pq_update(pq); } static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq) { if (atomic_dec_and_test(&pq->n_reqs)) wake_up(&pq->wait); } static void user_sdma_free_request(struct user_sdma_request *req) { if (!list_empty(&req->txps)) { struct sdma_txreq *t, *p; list_for_each_entry_safe(t, p, &req->txps, list) { struct user_sdma_txreq *tx = container_of(t, struct user_sdma_txreq, txreq); list_del_init(&t->list); sdma_txclean(req->pq->dd, t); kmem_cache_free(req->pq->txreq_cache, tx); } } kfree(req->tids); clear_bit(req->info.comp_idx, req->pq->req_in_use); } static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq, struct hfi1_user_sdma_comp_q *cq, u16 idx, enum hfi1_sdma_comp_state state, int ret) { if (state == ERROR) cq->comps[idx].errcode = -ret; smp_wmb(); /* make sure errcode is visible first */ cq->comps[idx].status = state; trace_hfi1_sdma_user_completion(pq->dd, pq->ctxt, pq->subctxt, idx, state, ret); }
linux-master
drivers/infiniband/hw/hfi1/user_sdma.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2015-2018 Intel Corporation. */ #include <linux/delay.h> #include "hfi.h" #include "qp.h" #include "trace.h" #define SC(name) SEND_CTXT_##name /* * Send Context functions */ static void sc_wait_for_packet_egress(struct send_context *sc, int pause); /* * Set the CM reset bit and wait for it to clear. Use the provided * sendctrl register. This routine has no locking. */ void __cm_reset(struct hfi1_devdata *dd, u64 sendctrl) { write_csr(dd, SEND_CTRL, sendctrl | SEND_CTRL_CM_RESET_SMASK); while (1) { udelay(1); sendctrl = read_csr(dd, SEND_CTRL); if ((sendctrl & SEND_CTRL_CM_RESET_SMASK) == 0) break; } } /* global control of PIO send */ void pio_send_control(struct hfi1_devdata *dd, int op) { u64 reg, mask; unsigned long flags; int write = 1; /* write sendctrl back */ int flush = 0; /* re-read sendctrl to make sure it is flushed */ int i; spin_lock_irqsave(&dd->sendctrl_lock, flags); reg = read_csr(dd, SEND_CTRL); switch (op) { case PSC_GLOBAL_ENABLE: reg |= SEND_CTRL_SEND_ENABLE_SMASK; fallthrough; case PSC_DATA_VL_ENABLE: mask = 0; for (i = 0; i < ARRAY_SIZE(dd->vld); i++) if (!dd->vld[i].mtu) mask |= BIT_ULL(i); /* Disallow sending on VLs not enabled */ mask = (mask & SEND_CTRL_UNSUPPORTED_VL_MASK) << SEND_CTRL_UNSUPPORTED_VL_SHIFT; reg = (reg & ~SEND_CTRL_UNSUPPORTED_VL_SMASK) | mask; break; case PSC_GLOBAL_DISABLE: reg &= ~SEND_CTRL_SEND_ENABLE_SMASK; break; case PSC_GLOBAL_VLARB_ENABLE: reg |= SEND_CTRL_VL_ARBITER_ENABLE_SMASK; break; case PSC_GLOBAL_VLARB_DISABLE: reg &= ~SEND_CTRL_VL_ARBITER_ENABLE_SMASK; break; case PSC_CM_RESET: __cm_reset(dd, reg); write = 0; /* CSR already written (and flushed) */ break; case PSC_DATA_VL_DISABLE: reg |= SEND_CTRL_UNSUPPORTED_VL_SMASK; flush = 1; break; default: dd_dev_err(dd, "%s: invalid control %d\n", __func__, op); break; } if (write) { write_csr(dd, SEND_CTRL, reg); if (flush) (void)read_csr(dd, SEND_CTRL); /* flush write */ } spin_unlock_irqrestore(&dd->sendctrl_lock, flags); } /* number of send context memory pools */ #define NUM_SC_POOLS 2 /* Send Context Size (SCS) wildcards */ #define SCS_POOL_0 -1 #define SCS_POOL_1 -2 /* Send Context Count (SCC) wildcards */ #define SCC_PER_VL -1 #define SCC_PER_CPU -2 #define SCC_PER_KRCVQ -3 /* Send Context Size (SCS) constants */ #define SCS_ACK_CREDITS 32 #define SCS_VL15_CREDITS 102 /* 3 pkts of 2048B data + 128B header */ #define PIO_THRESHOLD_CEILING 4096 #define PIO_WAIT_BATCH_SIZE 5 /* default send context sizes */ static struct sc_config_sizes sc_config_sizes[SC_MAX] = { [SC_KERNEL] = { .size = SCS_POOL_0, /* even divide, pool 0 */ .count = SCC_PER_VL }, /* one per NUMA */ [SC_ACK] = { .size = SCS_ACK_CREDITS, .count = SCC_PER_KRCVQ }, [SC_USER] = { .size = SCS_POOL_0, /* even divide, pool 0 */ .count = SCC_PER_CPU }, /* one per CPU */ [SC_VL15] = { .size = SCS_VL15_CREDITS, .count = 1 }, }; /* send context memory pool configuration */ struct mem_pool_config { int centipercent; /* % of memory, in 100ths of 1% */ int absolute_blocks; /* absolute block count */ }; /* default memory pool configuration: 100% in pool 0 */ static struct mem_pool_config sc_mem_pool_config[NUM_SC_POOLS] = { /* centi%, abs blocks */ { 10000, -1 }, /* pool 0 */ { 0, -1 }, /* pool 1 */ }; /* memory pool information, used when calculating final sizes */ struct mem_pool_info { int centipercent; /* * 100th of 1% of memory to use, -1 if blocks * already set */ int count; /* count of contexts in the pool */ int blocks; /* block size of the pool */ int size; /* context size, in blocks */ }; /* * Convert a pool wildcard to a valid pool index. The wildcards * start at -1 and increase negatively. Map them as: * -1 => 0 * -2 => 1 * etc. * * Return -1 on non-wildcard input, otherwise convert to a pool number. */ static int wildcard_to_pool(int wc) { if (wc >= 0) return -1; /* non-wildcard */ return -wc - 1; } static const char *sc_type_names[SC_MAX] = { "kernel", "ack", "user", "vl15" }; static const char *sc_type_name(int index) { if (index < 0 || index >= SC_MAX) return "unknown"; return sc_type_names[index]; } /* * Read the send context memory pool configuration and send context * size configuration. Replace any wildcards and come up with final * counts and sizes for the send context types. */ int init_sc_pools_and_sizes(struct hfi1_devdata *dd) { struct mem_pool_info mem_pool_info[NUM_SC_POOLS] = { { 0 } }; int total_blocks = (chip_pio_mem_size(dd) / PIO_BLOCK_SIZE) - 1; int total_contexts = 0; int fixed_blocks; int pool_blocks; int used_blocks; int cp_total; /* centipercent total */ int ab_total; /* absolute block total */ int extra; int i; /* * When SDMA is enabled, kernel context pio packet size is capped by * "piothreshold". Reduce pio buffer allocation for kernel context by * setting it to a fixed size. The allocation allows 3-deep buffering * of the largest pio packets plus up to 128 bytes header, sufficient * to maintain verbs performance. * * When SDMA is disabled, keep the default pooling allocation. */ if (HFI1_CAP_IS_KSET(SDMA)) { u16 max_pkt_size = (piothreshold < PIO_THRESHOLD_CEILING) ? piothreshold : PIO_THRESHOLD_CEILING; sc_config_sizes[SC_KERNEL].size = 3 * (max_pkt_size + 128) / PIO_BLOCK_SIZE; } /* * Step 0: * - copy the centipercents/absolute sizes from the pool config * - sanity check these values * - add up centipercents, then later check for full value * - add up absolute blocks, then later check for over-commit */ cp_total = 0; ab_total = 0; for (i = 0; i < NUM_SC_POOLS; i++) { int cp = sc_mem_pool_config[i].centipercent; int ab = sc_mem_pool_config[i].absolute_blocks; /* * A negative value is "unused" or "invalid". Both *can* * be valid, but centipercent wins, so check that first */ if (cp >= 0) { /* centipercent valid */ cp_total += cp; } else if (ab >= 0) { /* absolute blocks valid */ ab_total += ab; } else { /* neither valid */ dd_dev_err( dd, "Send context memory pool %d: both the block count and centipercent are invalid\n", i); return -EINVAL; } mem_pool_info[i].centipercent = cp; mem_pool_info[i].blocks = ab; } /* do not use both % and absolute blocks for different pools */ if (cp_total != 0 && ab_total != 0) { dd_dev_err( dd, "All send context memory pools must be described as either centipercent or blocks, no mixing between pools\n"); return -EINVAL; } /* if any percentages are present, they must add up to 100% x 100 */ if (cp_total != 0 && cp_total != 10000) { dd_dev_err( dd, "Send context memory pool centipercent is %d, expecting 10000\n", cp_total); return -EINVAL; } /* the absolute pool total cannot be more than the mem total */ if (ab_total > total_blocks) { dd_dev_err( dd, "Send context memory pool absolute block count %d is larger than the memory size %d\n", ab_total, total_blocks); return -EINVAL; } /* * Step 2: * - copy from the context size config * - replace context type wildcard counts with real values * - add up non-memory pool block sizes * - add up memory pool user counts */ fixed_blocks = 0; for (i = 0; i < SC_MAX; i++) { int count = sc_config_sizes[i].count; int size = sc_config_sizes[i].size; int pool; /* * Sanity check count: Either a positive value or * one of the expected wildcards is valid. The positive * value is checked later when we compare against total * memory available. */ if (i == SC_ACK) { count = dd->n_krcv_queues; } else if (i == SC_KERNEL) { count = INIT_SC_PER_VL * num_vls; } else if (count == SCC_PER_CPU) { count = dd->num_rcv_contexts - dd->n_krcv_queues; } else if (count < 0) { dd_dev_err( dd, "%s send context invalid count wildcard %d\n", sc_type_name(i), count); return -EINVAL; } if (total_contexts + count > chip_send_contexts(dd)) count = chip_send_contexts(dd) - total_contexts; total_contexts += count; /* * Sanity check pool: The conversion will return a pool * number or -1 if a fixed (non-negative) value. The fixed * value is checked later when we compare against * total memory available. */ pool = wildcard_to_pool(size); if (pool == -1) { /* non-wildcard */ fixed_blocks += size * count; } else if (pool < NUM_SC_POOLS) { /* valid wildcard */ mem_pool_info[pool].count += count; } else { /* invalid wildcard */ dd_dev_err( dd, "%s send context invalid pool wildcard %d\n", sc_type_name(i), size); return -EINVAL; } dd->sc_sizes[i].count = count; dd->sc_sizes[i].size = size; } if (fixed_blocks > total_blocks) { dd_dev_err( dd, "Send context fixed block count, %u, larger than total block count %u\n", fixed_blocks, total_blocks); return -EINVAL; } /* step 3: calculate the blocks in the pools, and pool context sizes */ pool_blocks = total_blocks - fixed_blocks; if (ab_total > pool_blocks) { dd_dev_err( dd, "Send context fixed pool sizes, %u, larger than pool block count %u\n", ab_total, pool_blocks); return -EINVAL; } /* subtract off the fixed pool blocks */ pool_blocks -= ab_total; for (i = 0; i < NUM_SC_POOLS; i++) { struct mem_pool_info *pi = &mem_pool_info[i]; /* % beats absolute blocks */ if (pi->centipercent >= 0) pi->blocks = (pool_blocks * pi->centipercent) / 10000; if (pi->blocks == 0 && pi->count != 0) { dd_dev_err( dd, "Send context memory pool %d has %u contexts, but no blocks\n", i, pi->count); return -EINVAL; } if (pi->count == 0) { /* warn about wasted blocks */ if (pi->blocks != 0) dd_dev_err( dd, "Send context memory pool %d has %u blocks, but zero contexts\n", i, pi->blocks); pi->size = 0; } else { pi->size = pi->blocks / pi->count; } } /* step 4: fill in the context type sizes from the pool sizes */ used_blocks = 0; for (i = 0; i < SC_MAX; i++) { if (dd->sc_sizes[i].size < 0) { unsigned pool = wildcard_to_pool(dd->sc_sizes[i].size); WARN_ON_ONCE(pool >= NUM_SC_POOLS); dd->sc_sizes[i].size = mem_pool_info[pool].size; } /* make sure we are not larger than what is allowed by the HW */ #define PIO_MAX_BLOCKS 1024 if (dd->sc_sizes[i].size > PIO_MAX_BLOCKS) dd->sc_sizes[i].size = PIO_MAX_BLOCKS; /* calculate our total usage */ used_blocks += dd->sc_sizes[i].size * dd->sc_sizes[i].count; } extra = total_blocks - used_blocks; if (extra != 0) dd_dev_info(dd, "unused send context blocks: %d\n", extra); return total_contexts; } int init_send_contexts(struct hfi1_devdata *dd) { u16 base; int ret, i, j, context; ret = init_credit_return(dd); if (ret) return ret; dd->hw_to_sw = kmalloc_array(TXE_NUM_CONTEXTS, sizeof(u8), GFP_KERNEL); dd->send_contexts = kcalloc(dd->num_send_contexts, sizeof(struct send_context_info), GFP_KERNEL); if (!dd->send_contexts || !dd->hw_to_sw) { kfree(dd->hw_to_sw); kfree(dd->send_contexts); free_credit_return(dd); return -ENOMEM; } /* hardware context map starts with invalid send context indices */ for (i = 0; i < TXE_NUM_CONTEXTS; i++) dd->hw_to_sw[i] = INVALID_SCI; /* * All send contexts have their credit sizes. Allocate credits * for each context one after another from the global space. */ context = 0; base = 1; for (i = 0; i < SC_MAX; i++) { struct sc_config_sizes *scs = &dd->sc_sizes[i]; for (j = 0; j < scs->count; j++) { struct send_context_info *sci = &dd->send_contexts[context]; sci->type = i; sci->base = base; sci->credits = scs->size; context++; base += scs->size; } } return 0; } /* * Allocate a software index and hardware context of the given type. * * Must be called with dd->sc_lock held. */ static int sc_hw_alloc(struct hfi1_devdata *dd, int type, u32 *sw_index, u32 *hw_context) { struct send_context_info *sci; u32 index; u32 context; for (index = 0, sci = &dd->send_contexts[0]; index < dd->num_send_contexts; index++, sci++) { if (sci->type == type && sci->allocated == 0) { sci->allocated = 1; /* use a 1:1 mapping, but make them non-equal */ context = chip_send_contexts(dd) - index - 1; dd->hw_to_sw[context] = index; *sw_index = index; *hw_context = context; return 0; /* success */ } } dd_dev_err(dd, "Unable to locate a free type %d send context\n", type); return -ENOSPC; } /* * Free the send context given by its software index. * * Must be called with dd->sc_lock held. */ static void sc_hw_free(struct hfi1_devdata *dd, u32 sw_index, u32 hw_context) { struct send_context_info *sci; sci = &dd->send_contexts[sw_index]; if (!sci->allocated) { dd_dev_err(dd, "%s: sw_index %u not allocated? hw_context %u\n", __func__, sw_index, hw_context); } sci->allocated = 0; dd->hw_to_sw[hw_context] = INVALID_SCI; } /* return the base context of a context in a group */ static inline u32 group_context(u32 context, u32 group) { return (context >> group) << group; } /* return the size of a group */ static inline u32 group_size(u32 group) { return 1 << group; } /* * Obtain the credit return addresses, kernel virtual and bus, for the * given sc. * * To understand this routine: * o va and dma are arrays of struct credit_return. One for each physical * send context, per NUMA. * o Each send context always looks in its relative location in a struct * credit_return for its credit return. * o Each send context in a group must have its return address CSR programmed * with the same value. Use the address of the first send context in the * group. */ static void cr_group_addresses(struct send_context *sc, dma_addr_t *dma) { u32 gc = group_context(sc->hw_context, sc->group); u32 index = sc->hw_context & 0x7; sc->hw_free = &sc->dd->cr_base[sc->node].va[gc].cr[index]; *dma = (unsigned long) &((struct credit_return *)sc->dd->cr_base[sc->node].dma)[gc]; } /* * Work queue function triggered in error interrupt routine for * kernel contexts. */ static void sc_halted(struct work_struct *work) { struct send_context *sc; sc = container_of(work, struct send_context, halt_work); sc_restart(sc); } /* * Calculate PIO block threshold for this send context using the given MTU. * Trigger a return when one MTU plus optional header of credits remain. * * Parameter mtu is in bytes. * Parameter hdrqentsize is in DWORDs. * * Return value is what to write into the CSR: trigger return when * unreturned credits pass this count. */ u32 sc_mtu_to_threshold(struct send_context *sc, u32 mtu, u32 hdrqentsize) { u32 release_credits; u32 threshold; /* add in the header size, then divide by the PIO block size */ mtu += hdrqentsize << 2; release_credits = DIV_ROUND_UP(mtu, PIO_BLOCK_SIZE); /* check against this context's credits */ if (sc->credits <= release_credits) threshold = 1; else threshold = sc->credits - release_credits; return threshold; } /* * Calculate credit threshold in terms of percent of the allocated credits. * Trigger when unreturned credits equal or exceed the percentage of the whole. * * Return value is what to write into the CSR: trigger return when * unreturned credits pass this count. */ u32 sc_percent_to_threshold(struct send_context *sc, u32 percent) { return (sc->credits * percent) / 100; } /* * Set the credit return threshold. */ void sc_set_cr_threshold(struct send_context *sc, u32 new_threshold) { unsigned long flags; u32 old_threshold; int force_return = 0; spin_lock_irqsave(&sc->credit_ctrl_lock, flags); old_threshold = (sc->credit_ctrl >> SC(CREDIT_CTRL_THRESHOLD_SHIFT)) & SC(CREDIT_CTRL_THRESHOLD_MASK); if (new_threshold != old_threshold) { sc->credit_ctrl = (sc->credit_ctrl & ~SC(CREDIT_CTRL_THRESHOLD_SMASK)) | ((new_threshold & SC(CREDIT_CTRL_THRESHOLD_MASK)) << SC(CREDIT_CTRL_THRESHOLD_SHIFT)); write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_CTRL), sc->credit_ctrl); /* force a credit return on change to avoid a possible stall */ force_return = 1; } spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags); if (force_return) sc_return_credits(sc); } /* * set_pio_integrity * * Set the CHECK_ENABLE register for the send context 'sc'. */ void set_pio_integrity(struct send_context *sc) { struct hfi1_devdata *dd = sc->dd; u32 hw_context = sc->hw_context; int type = sc->type; write_kctxt_csr(dd, hw_context, SC(CHECK_ENABLE), hfi1_pkt_default_send_ctxt_mask(dd, type)); } static u32 get_buffers_allocated(struct send_context *sc) { int cpu; u32 ret = 0; for_each_possible_cpu(cpu) ret += *per_cpu_ptr(sc->buffers_allocated, cpu); return ret; } static void reset_buffers_allocated(struct send_context *sc) { int cpu; for_each_possible_cpu(cpu) (*per_cpu_ptr(sc->buffers_allocated, cpu)) = 0; } /* * Allocate a NUMA relative send context structure of the given type along * with a HW context. */ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type, uint hdrqentsize, int numa) { struct send_context_info *sci; struct send_context *sc = NULL; dma_addr_t dma; unsigned long flags; u64 reg; u32 thresh; u32 sw_index; u32 hw_context; int ret; u8 opval, opmask; /* do not allocate while frozen */ if (dd->flags & HFI1_FROZEN) return NULL; sc = kzalloc_node(sizeof(*sc), GFP_KERNEL, numa); if (!sc) return NULL; sc->buffers_allocated = alloc_percpu(u32); if (!sc->buffers_allocated) { kfree(sc); dd_dev_err(dd, "Cannot allocate buffers_allocated per cpu counters\n" ); return NULL; } spin_lock_irqsave(&dd->sc_lock, flags); ret = sc_hw_alloc(dd, type, &sw_index, &hw_context); if (ret) { spin_unlock_irqrestore(&dd->sc_lock, flags); free_percpu(sc->buffers_allocated); kfree(sc); return NULL; } sci = &dd->send_contexts[sw_index]; sci->sc = sc; sc->dd = dd; sc->node = numa; sc->type = type; spin_lock_init(&sc->alloc_lock); spin_lock_init(&sc->release_lock); spin_lock_init(&sc->credit_ctrl_lock); seqlock_init(&sc->waitlock); INIT_LIST_HEAD(&sc->piowait); INIT_WORK(&sc->halt_work, sc_halted); init_waitqueue_head(&sc->halt_wait); /* grouping is always single context for now */ sc->group = 0; sc->sw_index = sw_index; sc->hw_context = hw_context; cr_group_addresses(sc, &dma); sc->credits = sci->credits; sc->size = sc->credits * PIO_BLOCK_SIZE; /* PIO Send Memory Address details */ #define PIO_ADDR_CONTEXT_MASK 0xfful #define PIO_ADDR_CONTEXT_SHIFT 16 sc->base_addr = dd->piobase + ((hw_context & PIO_ADDR_CONTEXT_MASK) << PIO_ADDR_CONTEXT_SHIFT); /* set base and credits */ reg = ((sci->credits & SC(CTRL_CTXT_DEPTH_MASK)) << SC(CTRL_CTXT_DEPTH_SHIFT)) | ((sci->base & SC(CTRL_CTXT_BASE_MASK)) << SC(CTRL_CTXT_BASE_SHIFT)); write_kctxt_csr(dd, hw_context, SC(CTRL), reg); set_pio_integrity(sc); /* unmask all errors */ write_kctxt_csr(dd, hw_context, SC(ERR_MASK), (u64)-1); /* set the default partition key */ write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY), (SC(CHECK_PARTITION_KEY_VALUE_MASK) & DEFAULT_PKEY) << SC(CHECK_PARTITION_KEY_VALUE_SHIFT)); /* per context type checks */ if (type == SC_USER) { opval = USER_OPCODE_CHECK_VAL; opmask = USER_OPCODE_CHECK_MASK; } else { opval = OPCODE_CHECK_VAL_DISABLED; opmask = OPCODE_CHECK_MASK_DISABLED; } /* set the send context check opcode mask and value */ write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE), ((u64)opmask << SC(CHECK_OPCODE_MASK_SHIFT)) | ((u64)opval << SC(CHECK_OPCODE_VALUE_SHIFT))); /* set up credit return */ reg = dma & SC(CREDIT_RETURN_ADDR_ADDRESS_SMASK); write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), reg); /* * Calculate the initial credit return threshold. * * For Ack contexts, set a threshold for half the credits. * For User contexts use the given percentage. This has been * sanitized on driver start-up. * For Kernel contexts, use the default MTU plus a header * or half the credits, whichever is smaller. This should * work for both the 3-deep buffering allocation and the * pooling allocation. */ if (type == SC_ACK) { thresh = sc_percent_to_threshold(sc, 50); } else if (type == SC_USER) { thresh = sc_percent_to_threshold(sc, user_credit_return_threshold); } else { /* kernel */ thresh = min(sc_percent_to_threshold(sc, 50), sc_mtu_to_threshold(sc, hfi1_max_mtu, hdrqentsize)); } reg = thresh << SC(CREDIT_CTRL_THRESHOLD_SHIFT); /* add in early return */ if (type == SC_USER && HFI1_CAP_IS_USET(EARLY_CREDIT_RETURN)) reg |= SC(CREDIT_CTRL_EARLY_RETURN_SMASK); else if (HFI1_CAP_IS_KSET(EARLY_CREDIT_RETURN)) /* kernel, ack */ reg |= SC(CREDIT_CTRL_EARLY_RETURN_SMASK); /* set up write-through credit_ctrl */ sc->credit_ctrl = reg; write_kctxt_csr(dd, hw_context, SC(CREDIT_CTRL), reg); /* User send contexts should not allow sending on VL15 */ if (type == SC_USER) { reg = 1ULL << 15; write_kctxt_csr(dd, hw_context, SC(CHECK_VL), reg); } spin_unlock_irqrestore(&dd->sc_lock, flags); /* * Allocate shadow ring to track outstanding PIO buffers _after_ * unlocking. We don't know the size until the lock is held and * we can't allocate while the lock is held. No one is using * the context yet, so allocate it now. * * User contexts do not get a shadow ring. */ if (type != SC_USER) { /* * Size the shadow ring 1 larger than the number of credits * so head == tail can mean empty. */ sc->sr_size = sci->credits + 1; sc->sr = kcalloc_node(sc->sr_size, sizeof(union pio_shadow_ring), GFP_KERNEL, numa); if (!sc->sr) { sc_free(sc); return NULL; } } hfi1_cdbg(PIO, "Send context %u(%u) %s group %u credits %u credit_ctrl 0x%llx threshold %u", sw_index, hw_context, sc_type_name(type), sc->group, sc->credits, sc->credit_ctrl, thresh); return sc; } /* free a per-NUMA send context structure */ void sc_free(struct send_context *sc) { struct hfi1_devdata *dd; unsigned long flags; u32 sw_index; u32 hw_context; if (!sc) return; sc->flags |= SCF_IN_FREE; /* ensure no restarts */ dd = sc->dd; if (!list_empty(&sc->piowait)) dd_dev_err(dd, "piowait list not empty!\n"); sw_index = sc->sw_index; hw_context = sc->hw_context; sc_disable(sc); /* make sure the HW is disabled */ flush_work(&sc->halt_work); spin_lock_irqsave(&dd->sc_lock, flags); dd->send_contexts[sw_index].sc = NULL; /* clear/disable all registers set in sc_alloc */ write_kctxt_csr(dd, hw_context, SC(CTRL), 0); write_kctxt_csr(dd, hw_context, SC(CHECK_ENABLE), 0); write_kctxt_csr(dd, hw_context, SC(ERR_MASK), 0); write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY), 0); write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE), 0); write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), 0); write_kctxt_csr(dd, hw_context, SC(CREDIT_CTRL), 0); /* release the index and context for re-use */ sc_hw_free(dd, sw_index, hw_context); spin_unlock_irqrestore(&dd->sc_lock, flags); kfree(sc->sr); free_percpu(sc->buffers_allocated); kfree(sc); } /* disable the context */ void sc_disable(struct send_context *sc) { u64 reg; struct pio_buf *pbuf; LIST_HEAD(wake_list); if (!sc) return; /* do all steps, even if already disabled */ spin_lock_irq(&sc->alloc_lock); reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL)); reg &= ~SC(CTRL_CTXT_ENABLE_SMASK); sc->flags &= ~SCF_ENABLED; sc_wait_for_packet_egress(sc, 1); write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg); /* * Flush any waiters. Once the context is disabled, * credit return interrupts are stopped (although there * could be one in-process when the context is disabled). * Wait one microsecond for any lingering interrupts, then * proceed with the flush. */ udelay(1); spin_lock(&sc->release_lock); if (sc->sr) { /* this context has a shadow ring */ while (sc->sr_tail != sc->sr_head) { pbuf = &sc->sr[sc->sr_tail].pbuf; if (pbuf->cb) (*pbuf->cb)(pbuf->arg, PRC_SC_DISABLE); sc->sr_tail++; if (sc->sr_tail >= sc->sr_size) sc->sr_tail = 0; } } spin_unlock(&sc->release_lock); write_seqlock(&sc->waitlock); list_splice_init(&sc->piowait, &wake_list); write_sequnlock(&sc->waitlock); while (!list_empty(&wake_list)) { struct iowait *wait; struct rvt_qp *qp; struct hfi1_qp_priv *priv; wait = list_first_entry(&wake_list, struct iowait, list); qp = iowait_to_qp(wait); priv = qp->priv; list_del_init(&priv->s_iowait.list); priv->s_iowait.lock = NULL; hfi1_qp_wakeup(qp, RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN); } spin_unlock_irq(&sc->alloc_lock); } /* return SendEgressCtxtStatus.PacketOccupancy */ static u64 packet_occupancy(u64 reg) { return (reg & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SMASK) >> SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SHIFT; } /* is egress halted on the context? */ static bool egress_halted(u64 reg) { return !!(reg & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_HALT_STATUS_SMASK); } /* is the send context halted? */ static bool is_sc_halted(struct hfi1_devdata *dd, u32 hw_context) { return !!(read_kctxt_csr(dd, hw_context, SC(STATUS)) & SC(STATUS_CTXT_HALTED_SMASK)); } /** * sc_wait_for_packet_egress - wait for packet * @sc: valid send context * @pause: wait for credit return * * Wait for packet egress, optionally pause for credit return * * Egress halt and Context halt are not necessarily the same thing, so * check for both. * * NOTE: The context halt bit may not be set immediately. Because of this, * it is necessary to check the SW SFC_HALTED bit (set in the IRQ) and the HW * context bit to determine if the context is halted. */ static void sc_wait_for_packet_egress(struct send_context *sc, int pause) { struct hfi1_devdata *dd = sc->dd; u64 reg = 0; u64 reg_prev; u32 loop = 0; while (1) { reg_prev = reg; reg = read_csr(dd, sc->hw_context * 8 + SEND_EGRESS_CTXT_STATUS); /* done if any halt bits, SW or HW are set */ if (sc->flags & SCF_HALTED || is_sc_halted(dd, sc->hw_context) || egress_halted(reg)) break; reg = packet_occupancy(reg); if (reg == 0) break; /* counter is reset if occupancy count changes */ if (reg != reg_prev) loop = 0; if (loop > 50000) { /* timed out - bounce the link */ dd_dev_err(dd, "%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n", __func__, sc->sw_index, sc->hw_context, (u32)reg); queue_work(dd->pport->link_wq, &dd->pport->link_bounce_work); break; } loop++; udelay(1); } if (pause) /* Add additional delay to ensure chip returns all credits */ pause_for_credit_return(dd); } void sc_wait(struct hfi1_devdata *dd) { int i; for (i = 0; i < dd->num_send_contexts; i++) { struct send_context *sc = dd->send_contexts[i].sc; if (!sc) continue; sc_wait_for_packet_egress(sc, 0); } } /* * Restart a context after it has been halted due to error. * * If the first step fails - wait for the halt to be asserted, return early. * Otherwise complain about timeouts but keep going. * * It is expected that allocations (enabled flag bit) have been shut off * already (only applies to kernel contexts). */ int sc_restart(struct send_context *sc) { struct hfi1_devdata *dd = sc->dd; u64 reg; u32 loop; int count; /* bounce off if not halted, or being free'd */ if (!(sc->flags & SCF_HALTED) || (sc->flags & SCF_IN_FREE)) return -EINVAL; dd_dev_info(dd, "restarting send context %u(%u)\n", sc->sw_index, sc->hw_context); /* * Step 1: Wait for the context to actually halt. * * The error interrupt is asynchronous to actually setting halt * on the context. */ loop = 0; while (1) { reg = read_kctxt_csr(dd, sc->hw_context, SC(STATUS)); if (reg & SC(STATUS_CTXT_HALTED_SMASK)) break; if (loop > 100) { dd_dev_err(dd, "%s: context %u(%u) not halting, skipping\n", __func__, sc->sw_index, sc->hw_context); return -ETIME; } loop++; udelay(1); } /* * Step 2: Ensure no users are still trying to write to PIO. * * For kernel contexts, we have already turned off buffer allocation. * Now wait for the buffer count to go to zero. * * For user contexts, the user handling code has cut off write access * to the context's PIO pages before calling this routine and will * restore write access after this routine returns. */ if (sc->type != SC_USER) { /* kernel context */ loop = 0; while (1) { count = get_buffers_allocated(sc); if (count == 0) break; if (loop > 100) { dd_dev_err(dd, "%s: context %u(%u) timeout waiting for PIO buffers to zero, remaining %d\n", __func__, sc->sw_index, sc->hw_context, count); } loop++; udelay(1); } } /* * Step 3: Wait for all packets to egress. * This is done while disabling the send context * * Step 4: Disable the context * * This is a superset of the halt. After the disable, the * errors can be cleared. */ sc_disable(sc); /* * Step 5: Enable the context * * This enable will clear the halted flag and per-send context * error flags. */ return sc_enable(sc); } /* * PIO freeze processing. To be called after the TXE block is fully frozen. * Go through all frozen send contexts and disable them. The contexts are * already stopped by the freeze. */ void pio_freeze(struct hfi1_devdata *dd) { struct send_context *sc; int i; for (i = 0; i < dd->num_send_contexts; i++) { sc = dd->send_contexts[i].sc; /* * Don't disable unallocated, unfrozen, or user send contexts. * User send contexts will be disabled when the process * calls into the driver to reset its context. */ if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER) continue; /* only need to disable, the context is already stopped */ sc_disable(sc); } } /* * Unfreeze PIO for kernel send contexts. The precondition for calling this * is that all PIO send contexts have been disabled and the SPC freeze has * been cleared. Now perform the last step and re-enable each kernel context. * User (PSM) processing will occur when PSM calls into the kernel to * acknowledge the freeze. */ void pio_kernel_unfreeze(struct hfi1_devdata *dd) { struct send_context *sc; int i; for (i = 0; i < dd->num_send_contexts; i++) { sc = dd->send_contexts[i].sc; if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER) continue; if (sc->flags & SCF_LINK_DOWN) continue; sc_enable(sc); /* will clear the sc frozen flag */ } } /** * pio_kernel_linkup() - Re-enable send contexts after linkup event * @dd: valid devive data * * When the link goes down, the freeze path is taken. However, a link down * event is different from a freeze because if the send context is re-enabled * whowever is sending data will start sending data again, which will hang * any QP that is sending data. * * The freeze path now looks at the type of event that occurs and takes this * path for link down event. */ void pio_kernel_linkup(struct hfi1_devdata *dd) { struct send_context *sc; int i; for (i = 0; i < dd->num_send_contexts; i++) { sc = dd->send_contexts[i].sc; if (!sc || !(sc->flags & SCF_LINK_DOWN) || sc->type == SC_USER) continue; sc_enable(sc); /* will clear the sc link down flag */ } } /* * Wait for the SendPioInitCtxt.PioInitInProgress bit to clear. * Returns: * -ETIMEDOUT - if we wait too long * -EIO - if there was an error */ static int pio_init_wait_progress(struct hfi1_devdata *dd) { u64 reg; int max, count = 0; /* max is the longest possible HW init time / delay */ max = (dd->icode == ICODE_FPGA_EMULATION) ? 120 : 5; while (1) { reg = read_csr(dd, SEND_PIO_INIT_CTXT); if (!(reg & SEND_PIO_INIT_CTXT_PIO_INIT_IN_PROGRESS_SMASK)) break; if (count >= max) return -ETIMEDOUT; udelay(5); count++; } return reg & SEND_PIO_INIT_CTXT_PIO_INIT_ERR_SMASK ? -EIO : 0; } /* * Reset all of the send contexts to their power-on state. Used * only during manual init - no lock against sc_enable needed. */ void pio_reset_all(struct hfi1_devdata *dd) { int ret; /* make sure the init engine is not busy */ ret = pio_init_wait_progress(dd); /* ignore any timeout */ if (ret == -EIO) { /* clear the error */ write_csr(dd, SEND_PIO_ERR_CLEAR, SEND_PIO_ERR_CLEAR_PIO_INIT_SM_IN_ERR_SMASK); } /* reset init all */ write_csr(dd, SEND_PIO_INIT_CTXT, SEND_PIO_INIT_CTXT_PIO_ALL_CTXT_INIT_SMASK); udelay(2); ret = pio_init_wait_progress(dd); if (ret < 0) { dd_dev_err(dd, "PIO send context init %s while initializing all PIO blocks\n", ret == -ETIMEDOUT ? "is stuck" : "had an error"); } } /* enable the context */ int sc_enable(struct send_context *sc) { u64 sc_ctrl, reg, pio; struct hfi1_devdata *dd; unsigned long flags; int ret = 0; if (!sc) return -EINVAL; dd = sc->dd; /* * Obtain the allocator lock to guard against any allocation * attempts (which should not happen prior to context being * enabled). On the release/disable side we don't need to * worry about locking since the releaser will not do anything * if the context accounting values have not changed. */ spin_lock_irqsave(&sc->alloc_lock, flags); sc_ctrl = read_kctxt_csr(dd, sc->hw_context, SC(CTRL)); if ((sc_ctrl & SC(CTRL_CTXT_ENABLE_SMASK))) goto unlock; /* already enabled */ /* IMPORTANT: only clear free and fill if transitioning 0 -> 1 */ *sc->hw_free = 0; sc->free = 0; sc->alloc_free = 0; sc->fill = 0; sc->fill_wrap = 0; sc->sr_head = 0; sc->sr_tail = 0; sc->flags = 0; /* the alloc lock insures no fast path allocation */ reset_buffers_allocated(sc); /* * Clear all per-context errors. Some of these will be set when * we are re-enabling after a context halt. Now that the context * is disabled, the halt will not clear until after the PIO init * engine runs below. */ reg = read_kctxt_csr(dd, sc->hw_context, SC(ERR_STATUS)); if (reg) write_kctxt_csr(dd, sc->hw_context, SC(ERR_CLEAR), reg); /* * The HW PIO initialization engine can handle only one init * request at a time. Serialize access to each device's engine. */ spin_lock(&dd->sc_init_lock); /* * Since access to this code block is serialized and * each access waits for the initialization to complete * before releasing the lock, the PIO initialization engine * should not be in use, so we don't have to wait for the * InProgress bit to go down. */ pio = ((sc->hw_context & SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_MASK) << SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_SHIFT) | SEND_PIO_INIT_CTXT_PIO_SINGLE_CTXT_INIT_SMASK; write_csr(dd, SEND_PIO_INIT_CTXT, pio); /* * Wait until the engine is done. Give the chip the required time * so, hopefully, we read the register just once. */ udelay(2); ret = pio_init_wait_progress(dd); spin_unlock(&dd->sc_init_lock); if (ret) { dd_dev_err(dd, "sctxt%u(%u): Context not enabled due to init failure %d\n", sc->sw_index, sc->hw_context, ret); goto unlock; } /* * All is well. Enable the context. */ sc_ctrl |= SC(CTRL_CTXT_ENABLE_SMASK); write_kctxt_csr(dd, sc->hw_context, SC(CTRL), sc_ctrl); /* * Read SendCtxtCtrl to force the write out and prevent a timing * hazard where a PIO write may reach the context before the enable. */ read_kctxt_csr(dd, sc->hw_context, SC(CTRL)); sc->flags |= SCF_ENABLED; unlock: spin_unlock_irqrestore(&sc->alloc_lock, flags); return ret; } /* force a credit return on the context */ void sc_return_credits(struct send_context *sc) { if (!sc) return; /* a 0->1 transition schedules a credit return */ write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE), SC(CREDIT_FORCE_FORCE_RETURN_SMASK)); /* * Ensure that the write is flushed and the credit return is * scheduled. We care more about the 0 -> 1 transition. */ read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE)); /* set back to 0 for next time */ write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE), 0); } /* allow all in-flight packets to drain on the context */ void sc_flush(struct send_context *sc) { if (!sc) return; sc_wait_for_packet_egress(sc, 1); } /* drop all packets on the context, no waiting until they are sent */ void sc_drop(struct send_context *sc) { if (!sc) return; dd_dev_info(sc->dd, "%s: context %u(%u) - not implemented\n", __func__, sc->sw_index, sc->hw_context); } /* * Start the software reaction to a context halt or SPC freeze: * - mark the context as halted or frozen * - stop buffer allocations * * Called from the error interrupt. Other work is deferred until * out of the interrupt. */ void sc_stop(struct send_context *sc, int flag) { unsigned long flags; /* stop buffer allocations */ spin_lock_irqsave(&sc->alloc_lock, flags); /* mark the context */ sc->flags |= flag; sc->flags &= ~SCF_ENABLED; spin_unlock_irqrestore(&sc->alloc_lock, flags); wake_up(&sc->halt_wait); } #define BLOCK_DWORDS (PIO_BLOCK_SIZE / sizeof(u32)) #define dwords_to_blocks(x) DIV_ROUND_UP(x, BLOCK_DWORDS) /* * The send context buffer "allocator". * * @sc: the PIO send context we are allocating from * @len: length of whole packet - including PBC - in dwords * @cb: optional callback to call when the buffer is finished sending * @arg: argument for cb * * Return a pointer to a PIO buffer, NULL if not enough room, -ECOMM * when link is down. */ struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len, pio_release_cb cb, void *arg) { struct pio_buf *pbuf = NULL; unsigned long flags; unsigned long avail; unsigned long blocks = dwords_to_blocks(dw_len); u32 fill_wrap; int trycount = 0; u32 head, next; spin_lock_irqsave(&sc->alloc_lock, flags); if (!(sc->flags & SCF_ENABLED)) { spin_unlock_irqrestore(&sc->alloc_lock, flags); return ERR_PTR(-ECOMM); } retry: avail = (unsigned long)sc->credits - (sc->fill - sc->alloc_free); if (blocks > avail) { /* not enough room */ if (unlikely(trycount)) { /* already tried to get more room */ spin_unlock_irqrestore(&sc->alloc_lock, flags); goto done; } /* copy from receiver cache line and recalculate */ sc->alloc_free = READ_ONCE(sc->free); avail = (unsigned long)sc->credits - (sc->fill - sc->alloc_free); if (blocks > avail) { /* still no room, actively update */ sc_release_update(sc); sc->alloc_free = READ_ONCE(sc->free); trycount++; goto retry; } } /* there is enough room */ preempt_disable(); this_cpu_inc(*sc->buffers_allocated); /* read this once */ head = sc->sr_head; /* "allocate" the buffer */ sc->fill += blocks; fill_wrap = sc->fill_wrap; sc->fill_wrap += blocks; if (sc->fill_wrap >= sc->credits) sc->fill_wrap = sc->fill_wrap - sc->credits; /* * Fill the parts that the releaser looks at before moving the head. * The only necessary piece is the sent_at field. The credits * we have just allocated cannot have been returned yet, so the * cb and arg will not be looked at for a "while". Put them * on this side of the memory barrier anyway. */ pbuf = &sc->sr[head].pbuf; pbuf->sent_at = sc->fill; pbuf->cb = cb; pbuf->arg = arg; pbuf->sc = sc; /* could be filled in at sc->sr init time */ /* make sure this is in memory before updating the head */ /* calculate next head index, do not store */ next = head + 1; if (next >= sc->sr_size) next = 0; /* * update the head - must be last! - the releaser can look at fields * in pbuf once we move the head */ smp_wmb(); sc->sr_head = next; spin_unlock_irqrestore(&sc->alloc_lock, flags); /* finish filling in the buffer outside the lock */ pbuf->start = sc->base_addr + fill_wrap * PIO_BLOCK_SIZE; pbuf->end = sc->base_addr + sc->size; pbuf->qw_written = 0; pbuf->carry_bytes = 0; pbuf->carry.val64 = 0; done: return pbuf; } /* * There are at least two entities that can turn on credit return * interrupts and they can overlap. Avoid problems by implementing * a count scheme that is enforced by a lock. The lock is needed because * the count and CSR write must be paired. */ /* * Start credit return interrupts. This is managed by a count. If already * on, just increment the count. */ void sc_add_credit_return_intr(struct send_context *sc) { unsigned long flags; /* lock must surround both the count change and the CSR update */ spin_lock_irqsave(&sc->credit_ctrl_lock, flags); if (sc->credit_intr_count == 0) { sc->credit_ctrl |= SC(CREDIT_CTRL_CREDIT_INTR_SMASK); write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_CTRL), sc->credit_ctrl); } sc->credit_intr_count++; spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags); } /* * Stop credit return interrupts. This is managed by a count. Decrement the * count, if the last user, then turn the credit interrupts off. */ void sc_del_credit_return_intr(struct send_context *sc) { unsigned long flags; WARN_ON(sc->credit_intr_count == 0); /* lock must surround both the count change and the CSR update */ spin_lock_irqsave(&sc->credit_ctrl_lock, flags); sc->credit_intr_count--; if (sc->credit_intr_count == 0) { sc->credit_ctrl &= ~SC(CREDIT_CTRL_CREDIT_INTR_SMASK); write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_CTRL), sc->credit_ctrl); } spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags); } /* * The caller must be careful when calling this. All needint calls * must be paired with !needint. */ void hfi1_sc_wantpiobuf_intr(struct send_context *sc, u32 needint) { if (needint) sc_add_credit_return_intr(sc); else sc_del_credit_return_intr(sc); trace_hfi1_wantpiointr(sc, needint, sc->credit_ctrl); if (needint) sc_return_credits(sc); } /** * sc_piobufavail - callback when a PIO buffer is available * @sc: the send context * * This is called from the interrupt handler when a PIO buffer is * available after hfi1_verbs_send() returned an error that no buffers were * available. Disable the interrupt if there are no more QPs waiting. */ static void sc_piobufavail(struct send_context *sc) { struct hfi1_devdata *dd = sc->dd; struct list_head *list; struct rvt_qp *qps[PIO_WAIT_BATCH_SIZE]; struct rvt_qp *qp; struct hfi1_qp_priv *priv; unsigned long flags; uint i, n = 0, top_idx = 0; if (dd->send_contexts[sc->sw_index].type != SC_KERNEL && dd->send_contexts[sc->sw_index].type != SC_VL15) return; list = &sc->piowait; /* * Note: checking that the piowait list is empty and clearing * the buffer available interrupt needs to be atomic or we * could end up with QPs on the wait list with the interrupt * disabled. */ write_seqlock_irqsave(&sc->waitlock, flags); while (!list_empty(list)) { struct iowait *wait; if (n == ARRAY_SIZE(qps)) break; wait = list_first_entry(list, struct iowait, list); iowait_get_priority(wait); qp = iowait_to_qp(wait); priv = qp->priv; list_del_init(&priv->s_iowait.list); priv->s_iowait.lock = NULL; if (n) { priv = qps[top_idx]->priv; top_idx = iowait_priority_update_top(wait, &priv->s_iowait, n, top_idx); } /* refcount held until actual wake up */ qps[n++] = qp; } /* * If there had been waiters and there are more * insure that we redo the force to avoid a potential hang. */ if (n) { hfi1_sc_wantpiobuf_intr(sc, 0); if (!list_empty(list)) hfi1_sc_wantpiobuf_intr(sc, 1); } write_sequnlock_irqrestore(&sc->waitlock, flags); /* Wake up the top-priority one first */ if (n) hfi1_qp_wakeup(qps[top_idx], RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN); for (i = 0; i < n; i++) if (i != top_idx) hfi1_qp_wakeup(qps[i], RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN); } /* translate a send credit update to a bit code of reasons */ static inline int fill_code(u64 hw_free) { int code = 0; if (hw_free & CR_STATUS_SMASK) code |= PRC_STATUS_ERR; if (hw_free & CR_CREDIT_RETURN_DUE_TO_PBC_SMASK) code |= PRC_PBC; if (hw_free & CR_CREDIT_RETURN_DUE_TO_THRESHOLD_SMASK) code |= PRC_THRESHOLD; if (hw_free & CR_CREDIT_RETURN_DUE_TO_ERR_SMASK) code |= PRC_FILL_ERR; if (hw_free & CR_CREDIT_RETURN_DUE_TO_FORCE_SMASK) code |= PRC_SC_DISABLE; return code; } /* use the jiffies compare to get the wrap right */ #define sent_before(a, b) time_before(a, b) /* a < b */ /* * The send context buffer "releaser". */ void sc_release_update(struct send_context *sc) { struct pio_buf *pbuf; u64 hw_free; u32 head, tail; unsigned long old_free; unsigned long free; unsigned long extra; unsigned long flags; int code; if (!sc) return; spin_lock_irqsave(&sc->release_lock, flags); /* update free */ hw_free = le64_to_cpu(*sc->hw_free); /* volatile read */ old_free = sc->free; extra = (((hw_free & CR_COUNTER_SMASK) >> CR_COUNTER_SHIFT) - (old_free & CR_COUNTER_MASK)) & CR_COUNTER_MASK; free = old_free + extra; trace_hfi1_piofree(sc, extra); /* call sent buffer callbacks */ code = -1; /* code not yet set */ head = READ_ONCE(sc->sr_head); /* snapshot the head */ tail = sc->sr_tail; while (head != tail) { pbuf = &sc->sr[tail].pbuf; if (sent_before(free, pbuf->sent_at)) { /* not sent yet */ break; } if (pbuf->cb) { if (code < 0) /* fill in code on first user */ code = fill_code(hw_free); (*pbuf->cb)(pbuf->arg, code); } tail++; if (tail >= sc->sr_size) tail = 0; } sc->sr_tail = tail; /* make sure tail is updated before free */ smp_wmb(); sc->free = free; spin_unlock_irqrestore(&sc->release_lock, flags); sc_piobufavail(sc); } /* * Send context group releaser. Argument is the send context that caused * the interrupt. Called from the send context interrupt handler. * * Call release on all contexts in the group. * * This routine takes the sc_lock without an irqsave because it is only * called from an interrupt handler. Adjust if that changes. */ void sc_group_release_update(struct hfi1_devdata *dd, u32 hw_context) { struct send_context *sc; u32 sw_index; u32 gc, gc_end; spin_lock(&dd->sc_lock); sw_index = dd->hw_to_sw[hw_context]; if (unlikely(sw_index >= dd->num_send_contexts)) { dd_dev_err(dd, "%s: invalid hw (%u) to sw (%u) mapping\n", __func__, hw_context, sw_index); goto done; } sc = dd->send_contexts[sw_index].sc; if (unlikely(!sc)) goto done; gc = group_context(hw_context, sc->group); gc_end = gc + group_size(sc->group); for (; gc < gc_end; gc++) { sw_index = dd->hw_to_sw[gc]; if (unlikely(sw_index >= dd->num_send_contexts)) { dd_dev_err(dd, "%s: invalid hw (%u) to sw (%u) mapping\n", __func__, hw_context, sw_index); continue; } sc_release_update(dd->send_contexts[sw_index].sc); } done: spin_unlock(&dd->sc_lock); } /* * pio_select_send_context_vl() - select send context * @dd: devdata * @selector: a spreading factor * @vl: this vl * * This function returns a send context based on the selector and a vl. * The mapping fields are protected by RCU */ struct send_context *pio_select_send_context_vl(struct hfi1_devdata *dd, u32 selector, u8 vl) { struct pio_vl_map *m; struct pio_map_elem *e; struct send_context *rval; /* * NOTE This should only happen if SC->VL changed after the initial * checks on the QP/AH * Default will return VL0's send context below */ if (unlikely(vl >= num_vls)) { rval = NULL; goto done; } rcu_read_lock(); m = rcu_dereference(dd->pio_map); if (unlikely(!m)) { rcu_read_unlock(); return dd->vld[0].sc; } e = m->map[vl & m->mask]; rval = e->ksc[selector & e->mask]; rcu_read_unlock(); done: rval = !rval ? dd->vld[0].sc : rval; return rval; } /* * pio_select_send_context_sc() - select send context * @dd: devdata * @selector: a spreading factor * @sc5: the 5 bit sc * * This function returns an send context based on the selector and an sc */ struct send_context *pio_select_send_context_sc(struct hfi1_devdata *dd, u32 selector, u8 sc5) { u8 vl = sc_to_vlt(dd, sc5); return pio_select_send_context_vl(dd, selector, vl); } /* * Free the indicated map struct */ static void pio_map_free(struct pio_vl_map *m) { int i; for (i = 0; m && i < m->actual_vls; i++) kfree(m->map[i]); kfree(m); } /* * Handle RCU callback */ static void pio_map_rcu_callback(struct rcu_head *list) { struct pio_vl_map *m = container_of(list, struct pio_vl_map, list); pio_map_free(m); } /* * Set credit return threshold for the kernel send context */ static void set_threshold(struct hfi1_devdata *dd, int scontext, int i) { u32 thres; thres = min(sc_percent_to_threshold(dd->kernel_send_context[scontext], 50), sc_mtu_to_threshold(dd->kernel_send_context[scontext], dd->vld[i].mtu, dd->rcd[0]->rcvhdrqentsize)); sc_set_cr_threshold(dd->kernel_send_context[scontext], thres); } /* * pio_map_init - called when #vls change * @dd: hfi1_devdata * @port: port number * @num_vls: number of vls * @vl_scontexts: per vl send context mapping (optional) * * This routine changes the mapping based on the number of vls. * * vl_scontexts is used to specify a non-uniform vl/send context * loading. NULL implies auto computing the loading and giving each * VL an uniform distribution of send contexts per VL. * * The auto algorithm computers the sc_per_vl and the number of extra * send contexts. Any extra send contexts are added from the last VL * on down * * rcu locking is used here to control access to the mapping fields. * * If either the num_vls or num_send_contexts are non-power of 2, the * array sizes in the struct pio_vl_map and the struct pio_map_elem are * rounded up to the next highest power of 2 and the first entry is * reused in a round robin fashion. * * If an error occurs the map change is not done and the mapping is not * chaged. * */ int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts) { int i, j; int extra, sc_per_vl; int scontext = 1; int num_kernel_send_contexts = 0; u8 lvl_scontexts[OPA_MAX_VLS]; struct pio_vl_map *oldmap, *newmap; if (!vl_scontexts) { for (i = 0; i < dd->num_send_contexts; i++) if (dd->send_contexts[i].type == SC_KERNEL) num_kernel_send_contexts++; /* truncate divide */ sc_per_vl = num_kernel_send_contexts / num_vls; /* extras */ extra = num_kernel_send_contexts % num_vls; vl_scontexts = lvl_scontexts; /* add extras from last vl down */ for (i = num_vls - 1; i >= 0; i--, extra--) vl_scontexts[i] = sc_per_vl + (extra > 0 ? 1 : 0); } /* build new map */ newmap = kzalloc(struct_size(newmap, map, roundup_pow_of_two(num_vls)), GFP_KERNEL); if (!newmap) goto bail; newmap->actual_vls = num_vls; newmap->vls = roundup_pow_of_two(num_vls); newmap->mask = (1 << ilog2(newmap->vls)) - 1; for (i = 0; i < newmap->vls; i++) { /* save for wrap around */ int first_scontext = scontext; if (i < newmap->actual_vls) { int sz = roundup_pow_of_two(vl_scontexts[i]); /* only allocate once */ newmap->map[i] = kzalloc(struct_size(newmap->map[i], ksc, sz), GFP_KERNEL); if (!newmap->map[i]) goto bail; newmap->map[i]->mask = (1 << ilog2(sz)) - 1; /* * assign send contexts and * adjust credit return threshold */ for (j = 0; j < sz; j++) { if (dd->kernel_send_context[scontext]) { newmap->map[i]->ksc[j] = dd->kernel_send_context[scontext]; set_threshold(dd, scontext, i); } if (++scontext >= first_scontext + vl_scontexts[i]) /* wrap back to first send context */ scontext = first_scontext; } } else { /* just re-use entry without allocating */ newmap->map[i] = newmap->map[i % num_vls]; } scontext = first_scontext + vl_scontexts[i]; } /* newmap in hand, save old map */ spin_lock_irq(&dd->pio_map_lock); oldmap = rcu_dereference_protected(dd->pio_map, lockdep_is_held(&dd->pio_map_lock)); /* publish newmap */ rcu_assign_pointer(dd->pio_map, newmap); spin_unlock_irq(&dd->pio_map_lock); /* success, free any old map after grace period */ if (oldmap) call_rcu(&oldmap->list, pio_map_rcu_callback); return 0; bail: /* free any partial allocation */ pio_map_free(newmap); return -ENOMEM; } void free_pio_map(struct hfi1_devdata *dd) { /* Free PIO map if allocated */ if (rcu_access_pointer(dd->pio_map)) { spin_lock_irq(&dd->pio_map_lock); pio_map_free(rcu_access_pointer(dd->pio_map)); RCU_INIT_POINTER(dd->pio_map, NULL); spin_unlock_irq(&dd->pio_map_lock); synchronize_rcu(); } kfree(dd->kernel_send_context); dd->kernel_send_context = NULL; } int init_pervl_scs(struct hfi1_devdata *dd) { int i; u64 mask, all_vl_mask = (u64)0x80ff; /* VLs 0-7, 15 */ u64 data_vls_mask = (u64)0x00ff; /* VLs 0-7 */ u32 ctxt; struct hfi1_pportdata *ppd = dd->pport; dd->vld[15].sc = sc_alloc(dd, SC_VL15, dd->rcd[0]->rcvhdrqentsize, dd->node); if (!dd->vld[15].sc) return -ENOMEM; hfi1_init_ctxt(dd->vld[15].sc); dd->vld[15].mtu = enum_to_mtu(OPA_MTU_2048); dd->kernel_send_context = kcalloc_node(dd->num_send_contexts, sizeof(struct send_context *), GFP_KERNEL, dd->node); if (!dd->kernel_send_context) goto freesc15; dd->kernel_send_context[0] = dd->vld[15].sc; for (i = 0; i < num_vls; i++) { /* * Since this function does not deal with a specific * receive context but we need the RcvHdrQ entry size, * use the size from rcd[0]. It is guaranteed to be * valid at this point and will remain the same for all * receive contexts. */ dd->vld[i].sc = sc_alloc(dd, SC_KERNEL, dd->rcd[0]->rcvhdrqentsize, dd->node); if (!dd->vld[i].sc) goto nomem; dd->kernel_send_context[i + 1] = dd->vld[i].sc; hfi1_init_ctxt(dd->vld[i].sc); /* non VL15 start with the max MTU */ dd->vld[i].mtu = hfi1_max_mtu; } for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++) { dd->kernel_send_context[i + 1] = sc_alloc(dd, SC_KERNEL, dd->rcd[0]->rcvhdrqentsize, dd->node); if (!dd->kernel_send_context[i + 1]) goto nomem; hfi1_init_ctxt(dd->kernel_send_context[i + 1]); } sc_enable(dd->vld[15].sc); ctxt = dd->vld[15].sc->hw_context; mask = all_vl_mask & ~(1LL << 15); write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask); dd_dev_info(dd, "Using send context %u(%u) for VL15\n", dd->vld[15].sc->sw_index, ctxt); for (i = 0; i < num_vls; i++) { sc_enable(dd->vld[i].sc); ctxt = dd->vld[i].sc->hw_context; mask = all_vl_mask & ~(data_vls_mask); write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask); } for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++) { sc_enable(dd->kernel_send_context[i + 1]); ctxt = dd->kernel_send_context[i + 1]->hw_context; mask = all_vl_mask & ~(data_vls_mask); write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask); } if (pio_map_init(dd, ppd->port - 1, num_vls, NULL)) goto nomem; return 0; nomem: for (i = 0; i < num_vls; i++) { sc_free(dd->vld[i].sc); dd->vld[i].sc = NULL; } for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++) sc_free(dd->kernel_send_context[i + 1]); kfree(dd->kernel_send_context); dd->kernel_send_context = NULL; freesc15: sc_free(dd->vld[15].sc); return -ENOMEM; } int init_credit_return(struct hfi1_devdata *dd) { int ret; int i; dd->cr_base = kcalloc( node_affinity.num_possible_nodes, sizeof(struct credit_return_base), GFP_KERNEL); if (!dd->cr_base) { ret = -ENOMEM; goto done; } for_each_node_with_cpus(i) { int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return); set_dev_node(&dd->pcidev->dev, i); dd->cr_base[i].va = dma_alloc_coherent(&dd->pcidev->dev, bytes, &dd->cr_base[i].dma, GFP_KERNEL); if (!dd->cr_base[i].va) { set_dev_node(&dd->pcidev->dev, dd->node); dd_dev_err(dd, "Unable to allocate credit return DMA range for NUMA %d\n", i); ret = -ENOMEM; goto done; } } set_dev_node(&dd->pcidev->dev, dd->node); ret = 0; done: return ret; } void free_credit_return(struct hfi1_devdata *dd) { int i; if (!dd->cr_base) return; for (i = 0; i < node_affinity.num_possible_nodes; i++) { if (dd->cr_base[i].va) { dma_free_coherent(&dd->pcidev->dev, TXE_NUM_CONTEXTS * sizeof(struct credit_return), dd->cr_base[i].va, dd->cr_base[i].dma); } } kfree(dd->cr_base); dd->cr_base = NULL; } void seqfile_dump_sci(struct seq_file *s, u32 i, struct send_context_info *sci) { struct send_context *sc = sci->sc; u64 reg; seq_printf(s, "SCI %u: type %u base %u credits %u\n", i, sci->type, sci->base, sci->credits); seq_printf(s, " flags 0x%x sw_inx %u hw_ctxt %u grp %u\n", sc->flags, sc->sw_index, sc->hw_context, sc->group); seq_printf(s, " sr_size %u credits %u sr_head %u sr_tail %u\n", sc->sr_size, sc->credits, sc->sr_head, sc->sr_tail); seq_printf(s, " fill %lu free %lu fill_wrap %u alloc_free %lu\n", sc->fill, sc->free, sc->fill_wrap, sc->alloc_free); seq_printf(s, " credit_intr_count %u credit_ctrl 0x%llx\n", sc->credit_intr_count, sc->credit_ctrl); reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_STATUS)); seq_printf(s, " *hw_free %llu CurrentFree %llu LastReturned %llu\n", (le64_to_cpu(*sc->hw_free) & CR_COUNTER_SMASK) >> CR_COUNTER_SHIFT, (reg >> SC(CREDIT_STATUS_CURRENT_FREE_COUNTER_SHIFT)) & SC(CREDIT_STATUS_CURRENT_FREE_COUNTER_MASK), reg & SC(CREDIT_STATUS_LAST_RETURNED_COUNTER_SMASK)); }
linux-master
drivers/infiniband/hw/hfi1/pio.c
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) /* * Copyright(c) 2018 - 2020 Intel Corporation. */ #include "hfi.h" #include "affinity.h" #include "sdma.h" #include "netdev.h" /** * msix_initialize() - Calculate, request and configure MSIx IRQs * @dd: valid hfi1 devdata * */ int msix_initialize(struct hfi1_devdata *dd) { u32 total; int ret; struct hfi1_msix_entry *entries; /* * MSIx interrupt count: * one for the general, "slow path" interrupt * one per used SDMA engine * one per kernel receive context * one for each VNIC context * ...any new IRQs should be added here. */ total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_netdev_contexts; if (total >= CCE_NUM_MSIX_VECTORS) return -EINVAL; ret = pci_alloc_irq_vectors(dd->pcidev, total, total, PCI_IRQ_MSIX); if (ret < 0) { dd_dev_err(dd, "pci_alloc_irq_vectors() failed: %d\n", ret); return ret; } entries = kcalloc(total, sizeof(*dd->msix_info.msix_entries), GFP_KERNEL); if (!entries) { pci_free_irq_vectors(dd->pcidev); return -ENOMEM; } dd->msix_info.msix_entries = entries; spin_lock_init(&dd->msix_info.msix_lock); bitmap_zero(dd->msix_info.in_use_msix, total); dd->msix_info.max_requested = total; dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total); return 0; } /** * msix_request_irq() - Allocate a free MSIx IRQ * @dd: valid devdata * @arg: context information for the IRQ * @handler: IRQ handler * @thread: IRQ thread handler (could be NULL) * @type: affinty IRQ type * @name: IRQ name * * Allocated an MSIx vector if available, and then create the appropriate * meta data needed to keep track of the pci IRQ request. * * Return: * < 0 Error * >= 0 MSIx vector * */ static int msix_request_irq(struct hfi1_devdata *dd, void *arg, irq_handler_t handler, irq_handler_t thread, enum irq_type type, const char *name) { unsigned long nr; int irq; int ret; struct hfi1_msix_entry *me; /* Allocate an MSIx vector */ spin_lock(&dd->msix_info.msix_lock); nr = find_first_zero_bit(dd->msix_info.in_use_msix, dd->msix_info.max_requested); if (nr < dd->msix_info.max_requested) __set_bit(nr, dd->msix_info.in_use_msix); spin_unlock(&dd->msix_info.msix_lock); if (nr == dd->msix_info.max_requested) return -ENOSPC; if (type < IRQ_SDMA || type >= IRQ_OTHER) return -EINVAL; irq = pci_irq_vector(dd->pcidev, nr); ret = pci_request_irq(dd->pcidev, nr, handler, thread, arg, name); if (ret) { dd_dev_err(dd, "%s: request for IRQ %d failed, MSIx %lx, err %d\n", name, irq, nr, ret); spin_lock(&dd->msix_info.msix_lock); __clear_bit(nr, dd->msix_info.in_use_msix); spin_unlock(&dd->msix_info.msix_lock); return ret; } /* * assign arg after pci_request_irq call, so it will be * cleaned up */ me = &dd->msix_info.msix_entries[nr]; me->irq = irq; me->arg = arg; me->type = type; /* This is a request, so a failure is not fatal */ ret = hfi1_get_irq_affinity(dd, me); if (ret) dd_dev_err(dd, "%s: unable to pin IRQ %d\n", name, ret); return nr; } static int msix_request_rcd_irq_common(struct hfi1_ctxtdata *rcd, irq_handler_t handler, irq_handler_t thread, const char *name) { int nr = msix_request_irq(rcd->dd, rcd, handler, thread, rcd->is_vnic ? IRQ_NETDEVCTXT : IRQ_RCVCTXT, name); if (nr < 0) return nr; /* * Set the interrupt register and mask for this * context's interrupt. */ rcd->ireg = (IS_RCVAVAIL_START + rcd->ctxt) / 64; rcd->imask = ((u64)1) << ((IS_RCVAVAIL_START + rcd->ctxt) % 64); rcd->msix_intr = nr; remap_intr(rcd->dd, IS_RCVAVAIL_START + rcd->ctxt, nr); return 0; } /** * msix_request_rcd_irq() - Helper function for RCVAVAIL IRQs * @rcd: valid rcd context * */ int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd) { char name[MAX_NAME_SIZE]; snprintf(name, sizeof(name), DRIVER_NAME "_%d kctxt%d", rcd->dd->unit, rcd->ctxt); return msix_request_rcd_irq_common(rcd, receive_context_interrupt, receive_context_thread, name); } /** * msix_netdev_request_rcd_irq - Helper function for RCVAVAIL IRQs * for netdev context * @rcd: valid netdev contexti */ int msix_netdev_request_rcd_irq(struct hfi1_ctxtdata *rcd) { char name[MAX_NAME_SIZE]; snprintf(name, sizeof(name), DRIVER_NAME "_%d nd kctxt%d", rcd->dd->unit, rcd->ctxt); return msix_request_rcd_irq_common(rcd, receive_context_interrupt_napi, NULL, name); } /** * msix_request_sdma_irq - Helper for getting SDMA IRQ resources * @sde: valid sdma engine * */ int msix_request_sdma_irq(struct sdma_engine *sde) { int nr; char name[MAX_NAME_SIZE]; snprintf(name, sizeof(name), DRIVER_NAME "_%d sdma%d", sde->dd->unit, sde->this_idx); nr = msix_request_irq(sde->dd, sde, sdma_interrupt, NULL, IRQ_SDMA, name); if (nr < 0) return nr; sde->msix_intr = nr; remap_sdma_interrupts(sde->dd, sde->this_idx, nr); return 0; } /** * msix_request_general_irq - Helper for getting general IRQ * resources * @dd: valid device data */ int msix_request_general_irq(struct hfi1_devdata *dd) { int nr; char name[MAX_NAME_SIZE]; snprintf(name, sizeof(name), DRIVER_NAME "_%d", dd->unit); nr = msix_request_irq(dd, dd, general_interrupt, NULL, IRQ_GENERAL, name); if (nr < 0) return nr; /* general interrupt must be MSIx vector 0 */ if (nr) { msix_free_irq(dd, (u8)nr); dd_dev_err(dd, "Invalid index %d for GENERAL IRQ\n", nr); return -EINVAL; } return 0; } /** * enable_sdma_srcs - Helper to enable SDMA IRQ srcs * @dd: valid devdata structure * @i: index of SDMA engine */ static void enable_sdma_srcs(struct hfi1_devdata *dd, int i) { set_intr_bits(dd, IS_SDMA_START + i, IS_SDMA_START + i, true); set_intr_bits(dd, IS_SDMA_PROGRESS_START + i, IS_SDMA_PROGRESS_START + i, true); set_intr_bits(dd, IS_SDMA_IDLE_START + i, IS_SDMA_IDLE_START + i, true); set_intr_bits(dd, IS_SDMAENG_ERR_START + i, IS_SDMAENG_ERR_START + i, true); } /** * msix_request_irqs() - Allocate all MSIx IRQs * @dd: valid devdata structure * * Helper function to request the used MSIx IRQs. * */ int msix_request_irqs(struct hfi1_devdata *dd) { int i; int ret = msix_request_general_irq(dd); if (ret) return ret; for (i = 0; i < dd->num_sdma; i++) { struct sdma_engine *sde = &dd->per_sdma[i]; ret = msix_request_sdma_irq(sde); if (ret) return ret; enable_sdma_srcs(sde->dd, i); } for (i = 0; i < dd->n_krcv_queues; i++) { struct hfi1_ctxtdata *rcd = hfi1_rcd_get_by_index_safe(dd, i); if (rcd) ret = msix_request_rcd_irq(rcd); hfi1_rcd_put(rcd); if (ret) return ret; } return 0; } /** * msix_free_irq() - Free the specified MSIx resources and IRQ * @dd: valid devdata * @msix_intr: MSIx vector to free. * */ void msix_free_irq(struct hfi1_devdata *dd, u8 msix_intr) { struct hfi1_msix_entry *me; if (msix_intr >= dd->msix_info.max_requested) return; me = &dd->msix_info.msix_entries[msix_intr]; if (!me->arg) /* => no irq, no affinity */ return; hfi1_put_irq_affinity(dd, me); pci_free_irq(dd->pcidev, msix_intr, me->arg); me->arg = NULL; spin_lock(&dd->msix_info.msix_lock); __clear_bit(msix_intr, dd->msix_info.in_use_msix); spin_unlock(&dd->msix_info.msix_lock); } /** * msix_clean_up_interrupts - Free all MSIx IRQ resources * @dd: valid device data data structure * * Free the MSIx and associated PCI resources, if they have been allocated. */ void msix_clean_up_interrupts(struct hfi1_devdata *dd) { int i; struct hfi1_msix_entry *me = dd->msix_info.msix_entries; /* remove irqs - must happen before disabling/turning off */ for (i = 0; i < dd->msix_info.max_requested; i++, me++) msix_free_irq(dd, i); /* clean structures */ kfree(dd->msix_info.msix_entries); dd->msix_info.msix_entries = NULL; dd->msix_info.max_requested = 0; pci_free_irq_vectors(dd->pcidev); } /** * msix_netdev_synchronize_irq - netdev IRQ synchronize * @dd: valid devdata */ void msix_netdev_synchronize_irq(struct hfi1_devdata *dd) { int i; int ctxt_count = hfi1_netdev_ctxt_count(dd); for (i = 0; i < ctxt_count; i++) { struct hfi1_ctxtdata *rcd = hfi1_netdev_get_ctxt(dd, i); struct hfi1_msix_entry *me; me = &dd->msix_info.msix_entries[rcd->msix_intr]; synchronize_irq(me->irq); } }
linux-master
drivers/infiniband/hw/hfi1/msix.c
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) /* * Copyright(c) 2020 Intel Corporation. * */ /* * This file contains HFI1 support for ipoib functionality */ #include "ipoib.h" #include "hfi.h" static u32 qpn_from_mac(const u8 *mac_arr) { return (u32)mac_arr[1] << 16 | mac_arr[2] << 8 | mac_arr[3]; } static int hfi1_ipoib_dev_init(struct net_device *dev) { struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev); int ret; dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); if (!dev->tstats) return -ENOMEM; ret = priv->netdev_ops->ndo_init(dev); if (ret) goto out_ret; ret = hfi1_netdev_add_data(priv->dd, qpn_from_mac(priv->netdev->dev_addr), dev); if (ret < 0) { priv->netdev_ops->ndo_uninit(dev); goto out_ret; } return 0; out_ret: free_percpu(dev->tstats); dev->tstats = NULL; return ret; } static void hfi1_ipoib_dev_uninit(struct net_device *dev) { struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev); free_percpu(dev->tstats); dev->tstats = NULL; hfi1_netdev_remove_data(priv->dd, qpn_from_mac(priv->netdev->dev_addr)); priv->netdev_ops->ndo_uninit(dev); } static int hfi1_ipoib_dev_open(struct net_device *dev) { struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev); int ret; ret = priv->netdev_ops->ndo_open(dev); if (!ret) { struct hfi1_ibport *ibp = to_iport(priv->device, priv->port_num); struct rvt_qp *qp; u32 qpn = qpn_from_mac(priv->netdev->dev_addr); rcu_read_lock(); qp = rvt_lookup_qpn(ib_to_rvt(priv->device), &ibp->rvp, qpn); if (!qp) { rcu_read_unlock(); priv->netdev_ops->ndo_stop(dev); return -EINVAL; } rvt_get_qp(qp); priv->qp = qp; rcu_read_unlock(); hfi1_netdev_enable_queues(priv->dd); hfi1_ipoib_napi_tx_enable(dev); } return ret; } static int hfi1_ipoib_dev_stop(struct net_device *dev) { struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev); if (!priv->qp) return 0; hfi1_ipoib_napi_tx_disable(dev); hfi1_netdev_disable_queues(priv->dd); rvt_put_qp(priv->qp); priv->qp = NULL; return priv->netdev_ops->ndo_stop(dev); } static const struct net_device_ops hfi1_ipoib_netdev_ops = { .ndo_init = hfi1_ipoib_dev_init, .ndo_uninit = hfi1_ipoib_dev_uninit, .ndo_open = hfi1_ipoib_dev_open, .ndo_stop = hfi1_ipoib_dev_stop, .ndo_get_stats64 = dev_get_tstats64, }; static int hfi1_ipoib_mcast_attach(struct net_device *dev, struct ib_device *device, union ib_gid *mgid, u16 mlid, int set_qkey, u32 qkey) { struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev); u32 qpn = (u32)qpn_from_mac(priv->netdev->dev_addr); struct hfi1_ibport *ibp = to_iport(priv->device, priv->port_num); struct rvt_qp *qp; int ret = -EINVAL; rcu_read_lock(); qp = rvt_lookup_qpn(ib_to_rvt(priv->device), &ibp->rvp, qpn); if (qp) { rvt_get_qp(qp); rcu_read_unlock(); if (set_qkey) priv->qkey = qkey; /* attach QP to multicast group */ ret = ib_attach_mcast(&qp->ibqp, mgid, mlid); rvt_put_qp(qp); } else { rcu_read_unlock(); } return ret; } static int hfi1_ipoib_mcast_detach(struct net_device *dev, struct ib_device *device, union ib_gid *mgid, u16 mlid) { struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev); u32 qpn = (u32)qpn_from_mac(priv->netdev->dev_addr); struct hfi1_ibport *ibp = to_iport(priv->device, priv->port_num); struct rvt_qp *qp; int ret = -EINVAL; rcu_read_lock(); qp = rvt_lookup_qpn(ib_to_rvt(priv->device), &ibp->rvp, qpn); if (qp) { rvt_get_qp(qp); rcu_read_unlock(); ret = ib_detach_mcast(&qp->ibqp, mgid, mlid); rvt_put_qp(qp); } else { rcu_read_unlock(); } return ret; } static void hfi1_ipoib_netdev_dtor(struct net_device *dev) { struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev); hfi1_ipoib_txreq_deinit(priv); hfi1_ipoib_rxq_deinit(priv->netdev); free_percpu(dev->tstats); dev->tstats = NULL; } static void hfi1_ipoib_set_id(struct net_device *dev, int id) { struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev); priv->pkey_index = (u16)id; ib_query_pkey(priv->device, priv->port_num, priv->pkey_index, &priv->pkey); } static int hfi1_ipoib_setup_rn(struct ib_device *device, u32 port_num, struct net_device *netdev, void *param) { struct hfi1_devdata *dd = dd_from_ibdev(device); struct rdma_netdev *rn = netdev_priv(netdev); struct hfi1_ipoib_dev_priv *priv; int rc; rn->send = hfi1_ipoib_send; rn->tx_timeout = hfi1_ipoib_tx_timeout; rn->attach_mcast = hfi1_ipoib_mcast_attach; rn->detach_mcast = hfi1_ipoib_mcast_detach; rn->set_id = hfi1_ipoib_set_id; rn->hca = device; rn->port_num = port_num; rn->mtu = netdev->mtu; priv = hfi1_ipoib_priv(netdev); priv->dd = dd; priv->netdev = netdev; priv->device = device; priv->port_num = port_num; priv->netdev_ops = netdev->netdev_ops; ib_query_pkey(device, port_num, priv->pkey_index, &priv->pkey); rc = hfi1_ipoib_txreq_init(priv); if (rc) { dd_dev_err(dd, "IPoIB netdev TX init - failed(%d)\n", rc); return rc; } rc = hfi1_ipoib_rxq_init(netdev); if (rc) { dd_dev_err(dd, "IPoIB netdev RX init - failed(%d)\n", rc); hfi1_ipoib_txreq_deinit(priv); return rc; } netdev->netdev_ops = &hfi1_ipoib_netdev_ops; netdev->priv_destructor = hfi1_ipoib_netdev_dtor; netdev->needs_free_netdev = true; return 0; } int hfi1_ipoib_rn_get_params(struct ib_device *device, u32 port_num, enum rdma_netdev_t type, struct rdma_netdev_alloc_params *params) { struct hfi1_devdata *dd = dd_from_ibdev(device); if (type != RDMA_NETDEV_IPOIB) return -EOPNOTSUPP; if (!HFI1_CAP_IS_KSET(AIP) || !dd->num_netdev_contexts) return -EOPNOTSUPP; if (!port_num || port_num > dd->num_pports) return -EINVAL; params->sizeof_priv = sizeof(struct hfi1_ipoib_rdma_netdev); params->txqs = dd->num_sdma; params->rxqs = dd->num_netdev_contexts; params->param = NULL; params->initialize_rdma_netdev = hfi1_ipoib_setup_rn; return 0; }
linux-master
drivers/infiniband/hw/hfi1/ipoib_main.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2015 - 2018 Intel Corporation. */ #include "hfi.h" #include "verbs_txreq.h" #include "qp.h" /* cut down ridiculously long IB macro names */ #define OP(x) UC_OP(x) /** * hfi1_make_uc_req - construct a request packet (SEND, RDMA write) * @qp: a pointer to the QP * @ps: the current packet state * * Assume s_lock is held. * * Return 1 if constructed; otherwise, return 0. */ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) { struct hfi1_qp_priv *priv = qp->priv; struct ib_other_headers *ohdr; struct rvt_swqe *wqe; u32 hwords; u32 bth0 = 0; u32 len; u32 pmtu = qp->pmtu; int middle = 0; ps->s_txreq = get_txreq(ps->dev, qp); if (!ps->s_txreq) goto bail_no_tx; if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) goto bail; /* We are in the error state, flush the work request. */ if (qp->s_last == READ_ONCE(qp->s_head)) goto bail; /* If DMAs are in progress, we can't flush immediately. */ if (iowait_sdma_pending(&priv->s_iowait)) { qp->s_flags |= RVT_S_WAIT_DMA; goto bail; } clear_ahg(qp); wqe = rvt_get_swqe_ptr(qp, qp->s_last); rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); goto done_free_tx; } if (priv->hdr_type == HFI1_PKT_TYPE_9B) { /* header size in 32-bit words LRH+BTH = (8+12)/4. */ hwords = 5; if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) ohdr = &ps->s_txreq->phdr.hdr.ibh.u.l.oth; else ohdr = &ps->s_txreq->phdr.hdr.ibh.u.oth; } else { /* header size in 32-bit words 16B LRH+BTH = (16+12)/4. */ hwords = 7; if ((rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) && (hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr)))) ohdr = &ps->s_txreq->phdr.hdr.opah.u.l.oth; else ohdr = &ps->s_txreq->phdr.hdr.opah.u.oth; } /* Get the next send request. */ wqe = rvt_get_swqe_ptr(qp, qp->s_cur); qp->s_wqe = NULL; switch (qp->s_state) { default: if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) goto bail; /* Check if send work queue is empty. */ if (qp->s_cur == READ_ONCE(qp->s_head)) { clear_ahg(qp); goto bail; } /* * Local operations are processed immediately * after all prior requests have completed. */ if (wqe->wr.opcode == IB_WR_REG_MR || wqe->wr.opcode == IB_WR_LOCAL_INV) { int local_ops = 0; int err = 0; if (qp->s_last != qp->s_cur) goto bail; if (++qp->s_cur == qp->s_size) qp->s_cur = 0; if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) { err = rvt_invalidate_rkey( qp, wqe->wr.ex.invalidate_rkey); local_ops = 1; } rvt_send_complete(qp, wqe, err ? IB_WC_LOC_PROT_ERR : IB_WC_SUCCESS); if (local_ops) atomic_dec(&qp->local_ops_pending); goto done_free_tx; } /* * Start a new request. */ qp->s_psn = wqe->psn; qp->s_sge.sge = wqe->sg_list[0]; qp->s_sge.sg_list = wqe->sg_list + 1; qp->s_sge.num_sge = wqe->wr.num_sge; qp->s_sge.total_len = wqe->length; len = wqe->length; qp->s_len = len; switch (wqe->wr.opcode) { case IB_WR_SEND: case IB_WR_SEND_WITH_IMM: if (len > pmtu) { qp->s_state = OP(SEND_FIRST); len = pmtu; break; } if (wqe->wr.opcode == IB_WR_SEND) { qp->s_state = OP(SEND_ONLY); } else { qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE); /* Immediate data comes after the BTH */ ohdr->u.imm_data = wqe->wr.ex.imm_data; hwords += 1; } if (wqe->wr.send_flags & IB_SEND_SOLICITED) bth0 |= IB_BTH_SOLICITED; qp->s_wqe = wqe; if (++qp->s_cur >= qp->s_size) qp->s_cur = 0; break; case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE_WITH_IMM: ohdr->u.rc.reth.vaddr = cpu_to_be64(wqe->rdma_wr.remote_addr); ohdr->u.rc.reth.rkey = cpu_to_be32(wqe->rdma_wr.rkey); ohdr->u.rc.reth.length = cpu_to_be32(len); hwords += sizeof(struct ib_reth) / 4; if (len > pmtu) { qp->s_state = OP(RDMA_WRITE_FIRST); len = pmtu; break; } if (wqe->wr.opcode == IB_WR_RDMA_WRITE) { qp->s_state = OP(RDMA_WRITE_ONLY); } else { qp->s_state = OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE); /* Immediate data comes after the RETH */ ohdr->u.rc.imm_data = wqe->wr.ex.imm_data; hwords += 1; if (wqe->wr.send_flags & IB_SEND_SOLICITED) bth0 |= IB_BTH_SOLICITED; } qp->s_wqe = wqe; if (++qp->s_cur >= qp->s_size) qp->s_cur = 0; break; default: goto bail; } break; case OP(SEND_FIRST): qp->s_state = OP(SEND_MIDDLE); fallthrough; case OP(SEND_MIDDLE): len = qp->s_len; if (len > pmtu) { len = pmtu; middle = HFI1_CAP_IS_KSET(SDMA_AHG); break; } if (wqe->wr.opcode == IB_WR_SEND) { qp->s_state = OP(SEND_LAST); } else { qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); /* Immediate data comes after the BTH */ ohdr->u.imm_data = wqe->wr.ex.imm_data; hwords += 1; } if (wqe->wr.send_flags & IB_SEND_SOLICITED) bth0 |= IB_BTH_SOLICITED; qp->s_wqe = wqe; if (++qp->s_cur >= qp->s_size) qp->s_cur = 0; break; case OP(RDMA_WRITE_FIRST): qp->s_state = OP(RDMA_WRITE_MIDDLE); fallthrough; case OP(RDMA_WRITE_MIDDLE): len = qp->s_len; if (len > pmtu) { len = pmtu; middle = HFI1_CAP_IS_KSET(SDMA_AHG); break; } if (wqe->wr.opcode == IB_WR_RDMA_WRITE) { qp->s_state = OP(RDMA_WRITE_LAST); } else { qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE); /* Immediate data comes after the BTH */ ohdr->u.imm_data = wqe->wr.ex.imm_data; hwords += 1; if (wqe->wr.send_flags & IB_SEND_SOLICITED) bth0 |= IB_BTH_SOLICITED; } qp->s_wqe = wqe; if (++qp->s_cur >= qp->s_size) qp->s_cur = 0; break; } qp->s_len -= len; ps->s_txreq->hdr_dwords = hwords; ps->s_txreq->sde = priv->s_sde; ps->s_txreq->ss = &qp->s_sge; ps->s_txreq->s_cur_size = len; hfi1_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), qp->remote_qpn, mask_psn(qp->s_psn++), middle, ps); return 1; done_free_tx: hfi1_put_txreq(ps->s_txreq); ps->s_txreq = NULL; return 1; bail: hfi1_put_txreq(ps->s_txreq); bail_no_tx: ps->s_txreq = NULL; qp->s_flags &= ~RVT_S_BUSY; return 0; } /** * hfi1_uc_rcv - handle an incoming UC packet * @packet: the packet structure * * This is called from qp_rcv() to process an incoming UC packet * for the given QP. * Called at interrupt level. */ void hfi1_uc_rcv(struct hfi1_packet *packet) { struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd); void *data = packet->payload; u32 tlen = packet->tlen; struct rvt_qp *qp = packet->qp; struct ib_other_headers *ohdr = packet->ohdr; u32 opcode = packet->opcode; u32 hdrsize = packet->hlen; u32 psn; u32 pad = packet->pad; struct ib_wc wc; u32 pmtu = qp->pmtu; struct ib_reth *reth; int ret; u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2); if (hfi1_ruc_check_hdr(ibp, packet)) return; process_ecn(qp, packet); psn = ib_bth_get_psn(ohdr); /* Compare the PSN verses the expected PSN. */ if (unlikely(cmp_psn(psn, qp->r_psn) != 0)) { /* * Handle a sequence error. * Silently drop any current message. */ qp->r_psn = psn; inv: if (qp->r_state == OP(SEND_FIRST) || qp->r_state == OP(SEND_MIDDLE)) { set_bit(RVT_R_REWIND_SGE, &qp->r_aflags); qp->r_sge.num_sge = 0; } else { rvt_put_ss(&qp->r_sge); } qp->r_state = OP(SEND_LAST); switch (opcode) { case OP(SEND_FIRST): case OP(SEND_ONLY): case OP(SEND_ONLY_WITH_IMMEDIATE): goto send_first; case OP(RDMA_WRITE_FIRST): case OP(RDMA_WRITE_ONLY): case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): goto rdma_first; default: goto drop; } } /* Check for opcode sequence errors. */ switch (qp->r_state) { case OP(SEND_FIRST): case OP(SEND_MIDDLE): if (opcode == OP(SEND_MIDDLE) || opcode == OP(SEND_LAST) || opcode == OP(SEND_LAST_WITH_IMMEDIATE)) break; goto inv; case OP(RDMA_WRITE_FIRST): case OP(RDMA_WRITE_MIDDLE): if (opcode == OP(RDMA_WRITE_MIDDLE) || opcode == OP(RDMA_WRITE_LAST) || opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE)) break; goto inv; default: if (opcode == OP(SEND_FIRST) || opcode == OP(SEND_ONLY) || opcode == OP(SEND_ONLY_WITH_IMMEDIATE) || opcode == OP(RDMA_WRITE_FIRST) || opcode == OP(RDMA_WRITE_ONLY) || opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) break; goto inv; } if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) rvt_comm_est(qp); /* OK, process the packet. */ switch (opcode) { case OP(SEND_FIRST): case OP(SEND_ONLY): case OP(SEND_ONLY_WITH_IMMEDIATE): send_first: if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) { qp->r_sge = qp->s_rdma_read_sge; } else { ret = rvt_get_rwqe(qp, false); if (ret < 0) goto op_err; if (!ret) goto drop; /* * qp->s_rdma_read_sge will be the owner * of the mr references. */ qp->s_rdma_read_sge = qp->r_sge; } qp->r_rcv_len = 0; if (opcode == OP(SEND_ONLY)) goto no_immediate_data; else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE)) goto send_last_imm; fallthrough; case OP(SEND_MIDDLE): /* Check for invalid length PMTU or posted rwqe len. */ /* * There will be no padding for 9B packet but 16B packets * will come in with some padding since we always add * CRC and LT bytes which will need to be flit aligned */ if (unlikely(tlen != (hdrsize + pmtu + extra_bytes))) goto rewind; qp->r_rcv_len += pmtu; if (unlikely(qp->r_rcv_len > qp->r_len)) goto rewind; rvt_copy_sge(qp, &qp->r_sge, data, pmtu, false, false); break; case OP(SEND_LAST_WITH_IMMEDIATE): send_last_imm: wc.ex.imm_data = ohdr->u.imm_data; wc.wc_flags = IB_WC_WITH_IMM; goto send_last; case OP(SEND_LAST): no_immediate_data: wc.ex.imm_data = 0; wc.wc_flags = 0; send_last: /* Check for invalid length. */ /* LAST len should be >= 1 */ if (unlikely(tlen < (hdrsize + extra_bytes))) goto rewind; /* Don't count the CRC. */ tlen -= (hdrsize + extra_bytes); wc.byte_len = tlen + qp->r_rcv_len; if (unlikely(wc.byte_len > qp->r_len)) goto rewind; wc.opcode = IB_WC_RECV; rvt_copy_sge(qp, &qp->r_sge, data, tlen, false, false); rvt_put_ss(&qp->s_rdma_read_sge); last_imm: wc.wr_id = qp->r_wr_id; wc.status = IB_WC_SUCCESS; wc.qp = &qp->ibqp; wc.src_qp = qp->remote_qpn; wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX; /* * It seems that IB mandates the presence of an SL in a * work completion only for the UD transport (see section * 11.4.2 of IBTA Vol. 1). * * However, the way the SL is chosen below is consistent * with the way that IB/qib works and is trying avoid * introducing incompatibilities. * * See also OPA Vol. 1, section 9.7.6, and table 9-17. */ wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr); /* zero fields that are N/A */ wc.vendor_err = 0; wc.pkey_index = 0; wc.dlid_path_bits = 0; wc.port_num = 0; /* Signal completion event if the solicited bit is set. */ rvt_recv_cq(qp, &wc, ib_bth_is_solicited(ohdr)); break; case OP(RDMA_WRITE_FIRST): case OP(RDMA_WRITE_ONLY): case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */ rdma_first: if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) { goto drop; } reth = &ohdr->u.rc.reth; qp->r_len = be32_to_cpu(reth->length); qp->r_rcv_len = 0; qp->r_sge.sg_list = NULL; if (qp->r_len != 0) { u32 rkey = be32_to_cpu(reth->rkey); u64 vaddr = be64_to_cpu(reth->vaddr); int ok; /* Check rkey */ ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr, rkey, IB_ACCESS_REMOTE_WRITE); if (unlikely(!ok)) goto drop; qp->r_sge.num_sge = 1; } else { qp->r_sge.num_sge = 0; qp->r_sge.sge.mr = NULL; qp->r_sge.sge.vaddr = NULL; qp->r_sge.sge.length = 0; qp->r_sge.sge.sge_length = 0; } if (opcode == OP(RDMA_WRITE_ONLY)) { goto rdma_last; } else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) { wc.ex.imm_data = ohdr->u.rc.imm_data; goto rdma_last_imm; } fallthrough; case OP(RDMA_WRITE_MIDDLE): /* Check for invalid length PMTU or posted rwqe len. */ if (unlikely(tlen != (hdrsize + pmtu + 4))) goto drop; qp->r_rcv_len += pmtu; if (unlikely(qp->r_rcv_len > qp->r_len)) goto drop; rvt_copy_sge(qp, &qp->r_sge, data, pmtu, true, false); break; case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE): wc.ex.imm_data = ohdr->u.imm_data; rdma_last_imm: wc.wc_flags = IB_WC_WITH_IMM; /* Check for invalid length. */ /* LAST len should be >= 1 */ if (unlikely(tlen < (hdrsize + pad + 4))) goto drop; /* Don't count the CRC. */ tlen -= (hdrsize + extra_bytes); if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) goto drop; if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) { rvt_put_ss(&qp->s_rdma_read_sge); } else { ret = rvt_get_rwqe(qp, true); if (ret < 0) goto op_err; if (!ret) goto drop; } wc.byte_len = qp->r_len; wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, false); rvt_put_ss(&qp->r_sge); goto last_imm; case OP(RDMA_WRITE_LAST): rdma_last: /* Check for invalid length. */ /* LAST len should be >= 1 */ if (unlikely(tlen < (hdrsize + pad + 4))) goto drop; /* Don't count the CRC. */ tlen -= (hdrsize + extra_bytes); if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) goto drop; rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, false); rvt_put_ss(&qp->r_sge); break; default: /* Drop packet for unknown opcodes. */ goto drop; } qp->r_psn++; qp->r_state = opcode; return; rewind: set_bit(RVT_R_REWIND_SGE, &qp->r_aflags); qp->r_sge.num_sge = 0; drop: ibp->rvp.n_pkt_drops++; return; op_err: rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR); }
linux-master
drivers/infiniband/hw/hfi1/uc.c
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) /* * Copyright(c) 2018 - 2020 Intel Corporation. * */ #include "hfi.h" #include "qp.h" #include "rc.h" #include "verbs.h" #include "tid_rdma.h" #include "exp_rcv.h" #include "trace.h" /** * DOC: TID RDMA READ protocol * * This is an end-to-end protocol at the hfi1 level between two nodes that * improves performance by avoiding data copy on the requester side. It * converts a qualified RDMA READ request into a TID RDMA READ request on * the requester side and thereafter handles the request and response * differently. To be qualified, the RDMA READ request should meet the * following: * -- The total data length should be greater than 256K; * -- The total data length should be a multiple of 4K page size; * -- Each local scatter-gather entry should be 4K page aligned; * -- Each local scatter-gather entry should be a multiple of 4K page size; */ #define RCV_TID_FLOW_TABLE_CTRL_FLOW_VALID_SMASK BIT_ULL(32) #define RCV_TID_FLOW_TABLE_CTRL_HDR_SUPP_EN_SMASK BIT_ULL(33) #define RCV_TID_FLOW_TABLE_CTRL_KEEP_AFTER_SEQ_ERR_SMASK BIT_ULL(34) #define RCV_TID_FLOW_TABLE_CTRL_KEEP_ON_GEN_ERR_SMASK BIT_ULL(35) #define RCV_TID_FLOW_TABLE_STATUS_SEQ_MISMATCH_SMASK BIT_ULL(37) #define RCV_TID_FLOW_TABLE_STATUS_GEN_MISMATCH_SMASK BIT_ULL(38) /* Maximum number of packets within a flow generation. */ #define MAX_TID_FLOW_PSN BIT(HFI1_KDETH_BTH_SEQ_SHIFT) #define GENERATION_MASK 0xFFFFF static u32 mask_generation(u32 a) { return a & GENERATION_MASK; } /* Reserved generation value to set to unused flows for kernel contexts */ #define KERN_GENERATION_RESERVED mask_generation(U32_MAX) /* * J_KEY for kernel contexts when TID RDMA is used. * See generate_jkey() in hfi.h for more information. */ #define TID_RDMA_JKEY 32 #define HFI1_KERNEL_MIN_JKEY HFI1_ADMIN_JKEY_RANGE #define HFI1_KERNEL_MAX_JKEY (2 * HFI1_ADMIN_JKEY_RANGE - 1) /* Maximum number of segments in flight per QP request. */ #define TID_RDMA_MAX_READ_SEGS_PER_REQ 6 #define TID_RDMA_MAX_WRITE_SEGS_PER_REQ 4 #define MAX_REQ max_t(u16, TID_RDMA_MAX_READ_SEGS_PER_REQ, \ TID_RDMA_MAX_WRITE_SEGS_PER_REQ) #define MAX_FLOWS roundup_pow_of_two(MAX_REQ + 1) #define MAX_EXPECTED_PAGES (MAX_EXPECTED_BUFFER / PAGE_SIZE) #define TID_RDMA_DESTQP_FLOW_SHIFT 11 #define TID_RDMA_DESTQP_FLOW_MASK 0x1f #define TID_OPFN_QP_CTXT_MASK 0xff #define TID_OPFN_QP_CTXT_SHIFT 56 #define TID_OPFN_QP_KDETH_MASK 0xff #define TID_OPFN_QP_KDETH_SHIFT 48 #define TID_OPFN_MAX_LEN_MASK 0x7ff #define TID_OPFN_MAX_LEN_SHIFT 37 #define TID_OPFN_TIMEOUT_MASK 0x1f #define TID_OPFN_TIMEOUT_SHIFT 32 #define TID_OPFN_RESERVED_MASK 0x3f #define TID_OPFN_RESERVED_SHIFT 26 #define TID_OPFN_URG_MASK 0x1 #define TID_OPFN_URG_SHIFT 25 #define TID_OPFN_VER_MASK 0x7 #define TID_OPFN_VER_SHIFT 22 #define TID_OPFN_JKEY_MASK 0x3f #define TID_OPFN_JKEY_SHIFT 16 #define TID_OPFN_MAX_READ_MASK 0x3f #define TID_OPFN_MAX_READ_SHIFT 10 #define TID_OPFN_MAX_WRITE_MASK 0x3f #define TID_OPFN_MAX_WRITE_SHIFT 4 /* * OPFN TID layout * * 63 47 31 15 * NNNNNNNNKKKKKKKK MMMMMMMMMMMTTTTT DDDDDDUVVVJJJJJJ RRRRRRWWWWWWCCCC * 3210987654321098 7654321098765432 1098765432109876 5432109876543210 * N - the context Number * K - the Kdeth_qp * M - Max_len * T - Timeout * D - reserveD * V - version * U - Urg capable * J - Jkey * R - max_Read * W - max_Write * C - Capcode */ static void tid_rdma_trigger_resume(struct work_struct *work); static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req); static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req, gfp_t gfp); static void hfi1_init_trdma_req(struct rvt_qp *qp, struct tid_rdma_request *req); static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx); static void hfi1_tid_timeout(struct timer_list *t); static void hfi1_add_tid_reap_timer(struct rvt_qp *qp); static void hfi1_mod_tid_reap_timer(struct rvt_qp *qp); static void hfi1_mod_tid_retry_timer(struct rvt_qp *qp); static int hfi1_stop_tid_retry_timer(struct rvt_qp *qp); static void hfi1_tid_retry_timeout(struct timer_list *t); static int make_tid_rdma_ack(struct rvt_qp *qp, struct ib_other_headers *ohdr, struct hfi1_pkt_state *ps); static void hfi1_do_tid_send(struct rvt_qp *qp); static u32 read_r_next_psn(struct hfi1_devdata *dd, u8 ctxt, u8 fidx); static void tid_rdma_rcv_err(struct hfi1_packet *packet, struct ib_other_headers *ohdr, struct rvt_qp *qp, u32 psn, int diff, bool fecn); static void update_r_next_psn_fecn(struct hfi1_packet *packet, struct hfi1_qp_priv *priv, struct hfi1_ctxtdata *rcd, struct tid_rdma_flow *flow, bool fecn); static void validate_r_tid_ack(struct hfi1_qp_priv *priv) { if (priv->r_tid_ack == HFI1_QP_WQE_INVALID) priv->r_tid_ack = priv->r_tid_tail; } static void tid_rdma_schedule_ack(struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; priv->s_flags |= RVT_S_ACK_PENDING; hfi1_schedule_tid_send(qp); } static void tid_rdma_trigger_ack(struct rvt_qp *qp) { validate_r_tid_ack(qp->priv); tid_rdma_schedule_ack(qp); } static u64 tid_rdma_opfn_encode(struct tid_rdma_params *p) { return (((u64)p->qp & TID_OPFN_QP_CTXT_MASK) << TID_OPFN_QP_CTXT_SHIFT) | ((((u64)p->qp >> 16) & TID_OPFN_QP_KDETH_MASK) << TID_OPFN_QP_KDETH_SHIFT) | (((u64)((p->max_len >> PAGE_SHIFT) - 1) & TID_OPFN_MAX_LEN_MASK) << TID_OPFN_MAX_LEN_SHIFT) | (((u64)p->timeout & TID_OPFN_TIMEOUT_MASK) << TID_OPFN_TIMEOUT_SHIFT) | (((u64)p->urg & TID_OPFN_URG_MASK) << TID_OPFN_URG_SHIFT) | (((u64)p->jkey & TID_OPFN_JKEY_MASK) << TID_OPFN_JKEY_SHIFT) | (((u64)p->max_read & TID_OPFN_MAX_READ_MASK) << TID_OPFN_MAX_READ_SHIFT) | (((u64)p->max_write & TID_OPFN_MAX_WRITE_MASK) << TID_OPFN_MAX_WRITE_SHIFT); } static void tid_rdma_opfn_decode(struct tid_rdma_params *p, u64 data) { p->max_len = (((data >> TID_OPFN_MAX_LEN_SHIFT) & TID_OPFN_MAX_LEN_MASK) + 1) << PAGE_SHIFT; p->jkey = (data >> TID_OPFN_JKEY_SHIFT) & TID_OPFN_JKEY_MASK; p->max_write = (data >> TID_OPFN_MAX_WRITE_SHIFT) & TID_OPFN_MAX_WRITE_MASK; p->max_read = (data >> TID_OPFN_MAX_READ_SHIFT) & TID_OPFN_MAX_READ_MASK; p->qp = ((((data >> TID_OPFN_QP_KDETH_SHIFT) & TID_OPFN_QP_KDETH_MASK) << 16) | ((data >> TID_OPFN_QP_CTXT_SHIFT) & TID_OPFN_QP_CTXT_MASK)); p->urg = (data >> TID_OPFN_URG_SHIFT) & TID_OPFN_URG_MASK; p->timeout = (data >> TID_OPFN_TIMEOUT_SHIFT) & TID_OPFN_TIMEOUT_MASK; } void tid_rdma_opfn_init(struct rvt_qp *qp, struct tid_rdma_params *p) { struct hfi1_qp_priv *priv = qp->priv; p->qp = (RVT_KDETH_QP_PREFIX << 16) | priv->rcd->ctxt; p->max_len = TID_RDMA_MAX_SEGMENT_SIZE; p->jkey = priv->rcd->jkey; p->max_read = TID_RDMA_MAX_READ_SEGS_PER_REQ; p->max_write = TID_RDMA_MAX_WRITE_SEGS_PER_REQ; p->timeout = qp->timeout; p->urg = is_urg_masked(priv->rcd); } bool tid_rdma_conn_req(struct rvt_qp *qp, u64 *data) { struct hfi1_qp_priv *priv = qp->priv; *data = tid_rdma_opfn_encode(&priv->tid_rdma.local); return true; } bool tid_rdma_conn_reply(struct rvt_qp *qp, u64 data) { struct hfi1_qp_priv *priv = qp->priv; struct tid_rdma_params *remote, *old; bool ret = true; old = rcu_dereference_protected(priv->tid_rdma.remote, lockdep_is_held(&priv->opfn.lock)); data &= ~0xfULL; /* * If data passed in is zero, return true so as not to continue the * negotiation process */ if (!data || !HFI1_CAP_IS_KSET(TID_RDMA)) goto null; /* * If kzalloc fails, return false. This will result in: * * at the requester a new OPFN request being generated to retry * the negotiation * * at the responder, 0 being returned to the requester so as to * disable TID RDMA at both the requester and the responder */ remote = kzalloc(sizeof(*remote), GFP_ATOMIC); if (!remote) { ret = false; goto null; } tid_rdma_opfn_decode(remote, data); priv->tid_timer_timeout_jiffies = usecs_to_jiffies((((4096UL * (1UL << remote->timeout)) / 1000UL) << 3) * 7); trace_hfi1_opfn_param(qp, 0, &priv->tid_rdma.local); trace_hfi1_opfn_param(qp, 1, remote); rcu_assign_pointer(priv->tid_rdma.remote, remote); /* * A TID RDMA READ request's segment size is not equal to * remote->max_len only when the request's data length is smaller * than remote->max_len. In that case, there will be only one segment. * Therefore, when priv->pkts_ps is used to calculate req->cur_seg * during retry, it will lead to req->cur_seg = 0, which is exactly * what is expected. */ priv->pkts_ps = (u16)rvt_div_mtu(qp, remote->max_len); priv->timeout_shift = ilog2(priv->pkts_ps - 1) + 1; goto free; null: RCU_INIT_POINTER(priv->tid_rdma.remote, NULL); priv->timeout_shift = 0; free: if (old) kfree_rcu(old, rcu_head); return ret; } bool tid_rdma_conn_resp(struct rvt_qp *qp, u64 *data) { bool ret; ret = tid_rdma_conn_reply(qp, *data); *data = 0; /* * If tid_rdma_conn_reply() returns error, set *data as 0 to indicate * TID RDMA could not be enabled. This will result in TID RDMA being * disabled at the requester too. */ if (ret) (void)tid_rdma_conn_req(qp, data); return ret; } void tid_rdma_conn_error(struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; struct tid_rdma_params *old; old = rcu_dereference_protected(priv->tid_rdma.remote, lockdep_is_held(&priv->opfn.lock)); RCU_INIT_POINTER(priv->tid_rdma.remote, NULL); if (old) kfree_rcu(old, rcu_head); } /* This is called at context initialization time */ int hfi1_kern_exp_rcv_init(struct hfi1_ctxtdata *rcd, int reinit) { if (reinit) return 0; BUILD_BUG_ON(TID_RDMA_JKEY < HFI1_KERNEL_MIN_JKEY); BUILD_BUG_ON(TID_RDMA_JKEY > HFI1_KERNEL_MAX_JKEY); rcd->jkey = TID_RDMA_JKEY; hfi1_set_ctxt_jkey(rcd->dd, rcd, rcd->jkey); return hfi1_alloc_ctxt_rcv_groups(rcd); } /** * qp_to_rcd - determine the receive context used by a qp * @rdi: rvt dev struct * @qp: the qp * * This routine returns the receive context associated * with a a qp's qpn. * * Returns the context. */ static struct hfi1_ctxtdata *qp_to_rcd(struct rvt_dev_info *rdi, struct rvt_qp *qp) { struct hfi1_ibdev *verbs_dev = container_of(rdi, struct hfi1_ibdev, rdi); struct hfi1_devdata *dd = container_of(verbs_dev, struct hfi1_devdata, verbs_dev); unsigned int ctxt; if (qp->ibqp.qp_num == 0) ctxt = 0; else ctxt = hfi1_get_qp_map(dd, qp->ibqp.qp_num >> dd->qos_shift); return dd->rcd[ctxt]; } int hfi1_qp_priv_init(struct rvt_dev_info *rdi, struct rvt_qp *qp, struct ib_qp_init_attr *init_attr) { struct hfi1_qp_priv *qpriv = qp->priv; int i, ret; qpriv->rcd = qp_to_rcd(rdi, qp); spin_lock_init(&qpriv->opfn.lock); INIT_WORK(&qpriv->opfn.opfn_work, opfn_send_conn_request); INIT_WORK(&qpriv->tid_rdma.trigger_work, tid_rdma_trigger_resume); qpriv->flow_state.psn = 0; qpriv->flow_state.index = RXE_NUM_TID_FLOWS; qpriv->flow_state.last_index = RXE_NUM_TID_FLOWS; qpriv->flow_state.generation = KERN_GENERATION_RESERVED; qpriv->s_state = TID_OP(WRITE_RESP); qpriv->s_tid_cur = HFI1_QP_WQE_INVALID; qpriv->s_tid_head = HFI1_QP_WQE_INVALID; qpriv->s_tid_tail = HFI1_QP_WQE_INVALID; qpriv->rnr_nak_state = TID_RNR_NAK_INIT; qpriv->r_tid_head = HFI1_QP_WQE_INVALID; qpriv->r_tid_tail = HFI1_QP_WQE_INVALID; qpriv->r_tid_ack = HFI1_QP_WQE_INVALID; qpriv->r_tid_alloc = HFI1_QP_WQE_INVALID; atomic_set(&qpriv->n_requests, 0); atomic_set(&qpriv->n_tid_requests, 0); timer_setup(&qpriv->s_tid_timer, hfi1_tid_timeout, 0); timer_setup(&qpriv->s_tid_retry_timer, hfi1_tid_retry_timeout, 0); INIT_LIST_HEAD(&qpriv->tid_wait); if (init_attr->qp_type == IB_QPT_RC && HFI1_CAP_IS_KSET(TID_RDMA)) { struct hfi1_devdata *dd = qpriv->rcd->dd; qpriv->pages = kzalloc_node(TID_RDMA_MAX_PAGES * sizeof(*qpriv->pages), GFP_KERNEL, dd->node); if (!qpriv->pages) return -ENOMEM; for (i = 0; i < qp->s_size; i++) { struct hfi1_swqe_priv *priv; struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, i); priv = kzalloc_node(sizeof(*priv), GFP_KERNEL, dd->node); if (!priv) return -ENOMEM; hfi1_init_trdma_req(qp, &priv->tid_req); priv->tid_req.e.swqe = wqe; wqe->priv = priv; } for (i = 0; i < rvt_max_atomic(rdi); i++) { struct hfi1_ack_priv *priv; priv = kzalloc_node(sizeof(*priv), GFP_KERNEL, dd->node); if (!priv) return -ENOMEM; hfi1_init_trdma_req(qp, &priv->tid_req); priv->tid_req.e.ack = &qp->s_ack_queue[i]; ret = hfi1_kern_exp_rcv_alloc_flows(&priv->tid_req, GFP_KERNEL); if (ret) { kfree(priv); return ret; } qp->s_ack_queue[i].priv = priv; } } return 0; } void hfi1_qp_priv_tid_free(struct rvt_dev_info *rdi, struct rvt_qp *qp) { struct hfi1_qp_priv *qpriv = qp->priv; struct rvt_swqe *wqe; u32 i; if (qp->ibqp.qp_type == IB_QPT_RC && HFI1_CAP_IS_KSET(TID_RDMA)) { for (i = 0; i < qp->s_size; i++) { wqe = rvt_get_swqe_ptr(qp, i); kfree(wqe->priv); wqe->priv = NULL; } for (i = 0; i < rvt_max_atomic(rdi); i++) { struct hfi1_ack_priv *priv = qp->s_ack_queue[i].priv; if (priv) hfi1_kern_exp_rcv_free_flows(&priv->tid_req); kfree(priv); qp->s_ack_queue[i].priv = NULL; } cancel_work_sync(&qpriv->opfn.opfn_work); kfree(qpriv->pages); qpriv->pages = NULL; } } /* Flow and tid waiter functions */ /** * DOC: lock ordering * * There are two locks involved with the queuing * routines: the qp s_lock and the exp_lock. * * Since the tid space allocation is called from * the send engine, the qp s_lock is already held. * * The allocation routines will get the exp_lock. * * The first_qp() call is provided to allow the head of * the rcd wait queue to be fetched under the exp_lock and * followed by a drop of the exp_lock. * * Any qp in the wait list will have the qp reference count held * to hold the qp in memory. */ /* * return head of rcd wait list * * Must hold the exp_lock. * * Get a reference to the QP to hold the QP in memory. * * The caller must release the reference when the local * is no longer being used. */ static struct rvt_qp *first_qp(struct hfi1_ctxtdata *rcd, struct tid_queue *queue) __must_hold(&rcd->exp_lock) { struct hfi1_qp_priv *priv; lockdep_assert_held(&rcd->exp_lock); priv = list_first_entry_or_null(&queue->queue_head, struct hfi1_qp_priv, tid_wait); if (!priv) return NULL; rvt_get_qp(priv->owner); return priv->owner; } /** * kernel_tid_waiters - determine rcd wait * @rcd: the receive context * @queue: the queue to operate on * @qp: the head of the qp being processed * * This routine will return false IFF * the list is NULL or the head of the * list is the indicated qp. * * Must hold the qp s_lock and the exp_lock. * * Return: * false if either of the conditions below are satisfied: * 1. The list is empty or * 2. The indicated qp is at the head of the list and the * HFI1_S_WAIT_TID_SPACE bit is set in qp->s_flags. * true is returned otherwise. */ static bool kernel_tid_waiters(struct hfi1_ctxtdata *rcd, struct tid_queue *queue, struct rvt_qp *qp) __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock) { struct rvt_qp *fqp; bool ret = true; lockdep_assert_held(&qp->s_lock); lockdep_assert_held(&rcd->exp_lock); fqp = first_qp(rcd, queue); if (!fqp || (fqp == qp && (qp->s_flags & HFI1_S_WAIT_TID_SPACE))) ret = false; rvt_put_qp(fqp); return ret; } /** * dequeue_tid_waiter - dequeue the qp from the list * @rcd: the receive context * @queue: the queue to operate on * @qp: the qp to remove the wait list * * This routine removes the indicated qp from the * wait list if it is there. * * This should be done after the hardware flow and * tid array resources have been allocated. * * Must hold the qp s_lock and the rcd exp_lock. * * It assumes the s_lock to protect the s_flags * field and to reliably test the HFI1_S_WAIT_TID_SPACE flag. */ static void dequeue_tid_waiter(struct hfi1_ctxtdata *rcd, struct tid_queue *queue, struct rvt_qp *qp) __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock) { struct hfi1_qp_priv *priv = qp->priv; lockdep_assert_held(&qp->s_lock); lockdep_assert_held(&rcd->exp_lock); if (list_empty(&priv->tid_wait)) return; list_del_init(&priv->tid_wait); qp->s_flags &= ~HFI1_S_WAIT_TID_SPACE; queue->dequeue++; rvt_put_qp(qp); } /** * queue_qp_for_tid_wait - suspend QP on tid space * @rcd: the receive context * @queue: the queue to operate on * @qp: the qp * * The qp is inserted at the tail of the rcd * wait queue and the HFI1_S_WAIT_TID_SPACE s_flag is set. * * Must hold the qp s_lock and the exp_lock. */ static void queue_qp_for_tid_wait(struct hfi1_ctxtdata *rcd, struct tid_queue *queue, struct rvt_qp *qp) __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock) { struct hfi1_qp_priv *priv = qp->priv; lockdep_assert_held(&qp->s_lock); lockdep_assert_held(&rcd->exp_lock); if (list_empty(&priv->tid_wait)) { qp->s_flags |= HFI1_S_WAIT_TID_SPACE; list_add_tail(&priv->tid_wait, &queue->queue_head); priv->tid_enqueue = ++queue->enqueue; rcd->dd->verbs_dev.n_tidwait++; trace_hfi1_qpsleep(qp, HFI1_S_WAIT_TID_SPACE); rvt_get_qp(qp); } } /** * __trigger_tid_waiter - trigger tid waiter * @qp: the qp * * This is a private entrance to schedule the qp * assuming the caller is holding the qp->s_lock. */ static void __trigger_tid_waiter(struct rvt_qp *qp) __must_hold(&qp->s_lock) { lockdep_assert_held(&qp->s_lock); if (!(qp->s_flags & HFI1_S_WAIT_TID_SPACE)) return; trace_hfi1_qpwakeup(qp, HFI1_S_WAIT_TID_SPACE); hfi1_schedule_send(qp); } /** * tid_rdma_schedule_tid_wakeup - schedule wakeup for a qp * @qp: the qp * * trigger a schedule or a waiting qp in a deadlock * safe manner. The qp reference is held prior * to this call via first_qp(). * * If the qp trigger was already scheduled (!rval) * the reference is dropped, otherwise the resume * or the destroy cancel will dispatch the reference. */ static void tid_rdma_schedule_tid_wakeup(struct rvt_qp *qp) { struct hfi1_qp_priv *priv; struct hfi1_ibport *ibp; struct hfi1_pportdata *ppd; struct hfi1_devdata *dd; bool rval; if (!qp) return; priv = qp->priv; ibp = to_iport(qp->ibqp.device, qp->port_num); ppd = ppd_from_ibp(ibp); dd = dd_from_ibdev(qp->ibqp.device); rval = queue_work_on(priv->s_sde ? priv->s_sde->cpu : cpumask_first(cpumask_of_node(dd->node)), ppd->hfi1_wq, &priv->tid_rdma.trigger_work); if (!rval) rvt_put_qp(qp); } /** * tid_rdma_trigger_resume - field a trigger work request * @work: the work item * * Complete the off qp trigger processing by directly * calling the progress routine. */ static void tid_rdma_trigger_resume(struct work_struct *work) { struct tid_rdma_qp_params *tr; struct hfi1_qp_priv *priv; struct rvt_qp *qp; tr = container_of(work, struct tid_rdma_qp_params, trigger_work); priv = container_of(tr, struct hfi1_qp_priv, tid_rdma); qp = priv->owner; spin_lock_irq(&qp->s_lock); if (qp->s_flags & HFI1_S_WAIT_TID_SPACE) { spin_unlock_irq(&qp->s_lock); hfi1_do_send(priv->owner, true); } else { spin_unlock_irq(&qp->s_lock); } rvt_put_qp(qp); } /* * tid_rdma_flush_wait - unwind any tid space wait * * This is called when resetting a qp to * allow a destroy or reset to get rid * of any tid space linkage and reference counts. */ static void _tid_rdma_flush_wait(struct rvt_qp *qp, struct tid_queue *queue) __must_hold(&qp->s_lock) { struct hfi1_qp_priv *priv; if (!qp) return; lockdep_assert_held(&qp->s_lock); priv = qp->priv; qp->s_flags &= ~HFI1_S_WAIT_TID_SPACE; spin_lock(&priv->rcd->exp_lock); if (!list_empty(&priv->tid_wait)) { list_del_init(&priv->tid_wait); qp->s_flags &= ~HFI1_S_WAIT_TID_SPACE; queue->dequeue++; rvt_put_qp(qp); } spin_unlock(&priv->rcd->exp_lock); } void hfi1_tid_rdma_flush_wait(struct rvt_qp *qp) __must_hold(&qp->s_lock) { struct hfi1_qp_priv *priv = qp->priv; _tid_rdma_flush_wait(qp, &priv->rcd->flow_queue); _tid_rdma_flush_wait(qp, &priv->rcd->rarr_queue); } /* Flow functions */ /** * kern_reserve_flow - allocate a hardware flow * @rcd: the context to use for allocation * @last: the index of the preferred flow. Use RXE_NUM_TID_FLOWS to * signify "don't care". * * Use a bit mask based allocation to reserve a hardware * flow for use in receiving KDETH data packets. If a preferred flow is * specified the function will attempt to reserve that flow again, if * available. * * The exp_lock must be held. * * Return: * On success: a value postive value between 0 and RXE_NUM_TID_FLOWS - 1 * On failure: -EAGAIN */ static int kern_reserve_flow(struct hfi1_ctxtdata *rcd, int last) __must_hold(&rcd->exp_lock) { int nr; /* Attempt to reserve the preferred flow index */ if (last >= 0 && last < RXE_NUM_TID_FLOWS && !test_and_set_bit(last, &rcd->flow_mask)) return last; nr = ffz(rcd->flow_mask); BUILD_BUG_ON(RXE_NUM_TID_FLOWS >= (sizeof(rcd->flow_mask) * BITS_PER_BYTE)); if (nr > (RXE_NUM_TID_FLOWS - 1)) return -EAGAIN; set_bit(nr, &rcd->flow_mask); return nr; } static void kern_set_hw_flow(struct hfi1_ctxtdata *rcd, u32 generation, u32 flow_idx) { u64 reg; reg = ((u64)generation << HFI1_KDETH_BTH_SEQ_SHIFT) | RCV_TID_FLOW_TABLE_CTRL_FLOW_VALID_SMASK | RCV_TID_FLOW_TABLE_CTRL_KEEP_AFTER_SEQ_ERR_SMASK | RCV_TID_FLOW_TABLE_CTRL_KEEP_ON_GEN_ERR_SMASK | RCV_TID_FLOW_TABLE_STATUS_SEQ_MISMATCH_SMASK | RCV_TID_FLOW_TABLE_STATUS_GEN_MISMATCH_SMASK; if (generation != KERN_GENERATION_RESERVED) reg |= RCV_TID_FLOW_TABLE_CTRL_HDR_SUPP_EN_SMASK; write_uctxt_csr(rcd->dd, rcd->ctxt, RCV_TID_FLOW_TABLE + 8 * flow_idx, reg); } static u32 kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, u32 flow_idx) __must_hold(&rcd->exp_lock) { u32 generation = rcd->flows[flow_idx].generation; kern_set_hw_flow(rcd, generation, flow_idx); return generation; } static u32 kern_flow_generation_next(u32 gen) { u32 generation = mask_generation(gen + 1); if (generation == KERN_GENERATION_RESERVED) generation = mask_generation(generation + 1); return generation; } static void kern_clear_hw_flow(struct hfi1_ctxtdata *rcd, u32 flow_idx) __must_hold(&rcd->exp_lock) { rcd->flows[flow_idx].generation = kern_flow_generation_next(rcd->flows[flow_idx].generation); kern_set_hw_flow(rcd, KERN_GENERATION_RESERVED, flow_idx); } int hfi1_kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp) { struct hfi1_qp_priv *qpriv = (struct hfi1_qp_priv *)qp->priv; struct tid_flow_state *fs = &qpriv->flow_state; struct rvt_qp *fqp; unsigned long flags; int ret = 0; /* The QP already has an allocated flow */ if (fs->index != RXE_NUM_TID_FLOWS) return ret; spin_lock_irqsave(&rcd->exp_lock, flags); if (kernel_tid_waiters(rcd, &rcd->flow_queue, qp)) goto queue; ret = kern_reserve_flow(rcd, fs->last_index); if (ret < 0) goto queue; fs->index = ret; fs->last_index = fs->index; /* Generation received in a RESYNC overrides default flow generation */ if (fs->generation != KERN_GENERATION_RESERVED) rcd->flows[fs->index].generation = fs->generation; fs->generation = kern_setup_hw_flow(rcd, fs->index); fs->psn = 0; dequeue_tid_waiter(rcd, &rcd->flow_queue, qp); /* get head before dropping lock */ fqp = first_qp(rcd, &rcd->flow_queue); spin_unlock_irqrestore(&rcd->exp_lock, flags); tid_rdma_schedule_tid_wakeup(fqp); return 0; queue: queue_qp_for_tid_wait(rcd, &rcd->flow_queue, qp); spin_unlock_irqrestore(&rcd->exp_lock, flags); return -EAGAIN; } void hfi1_kern_clear_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp) { struct hfi1_qp_priv *qpriv = (struct hfi1_qp_priv *)qp->priv; struct tid_flow_state *fs = &qpriv->flow_state; struct rvt_qp *fqp; unsigned long flags; if (fs->index >= RXE_NUM_TID_FLOWS) return; spin_lock_irqsave(&rcd->exp_lock, flags); kern_clear_hw_flow(rcd, fs->index); clear_bit(fs->index, &rcd->flow_mask); fs->index = RXE_NUM_TID_FLOWS; fs->psn = 0; fs->generation = KERN_GENERATION_RESERVED; /* get head before dropping lock */ fqp = first_qp(rcd, &rcd->flow_queue); spin_unlock_irqrestore(&rcd->exp_lock, flags); if (fqp == qp) { __trigger_tid_waiter(fqp); rvt_put_qp(fqp); } else { tid_rdma_schedule_tid_wakeup(fqp); } } void hfi1_kern_init_ctxt_generations(struct hfi1_ctxtdata *rcd) { int i; for (i = 0; i < RXE_NUM_TID_FLOWS; i++) { rcd->flows[i].generation = mask_generation(get_random_u32()); kern_set_hw_flow(rcd, KERN_GENERATION_RESERVED, i); } } /* TID allocation functions */ static u8 trdma_pset_order(struct tid_rdma_pageset *s) { u8 count = s->count; return ilog2(count) + 1; } /** * tid_rdma_find_phys_blocks_4k - get groups base on mr info * @flow: overall info for a TID RDMA segment * @pages: pointer to an array of page structs * @npages: number of pages * @list: page set array to return * * This routine returns the number of groups associated with * the current sge information. This implementation is based * on the expected receive find_phys_blocks() adjusted to * use the MR information vs. the pfn. * * Return: * the number of RcvArray entries */ static u32 tid_rdma_find_phys_blocks_4k(struct tid_rdma_flow *flow, struct page **pages, u32 npages, struct tid_rdma_pageset *list) { u32 pagecount, pageidx, setcount = 0, i; void *vaddr, *this_vaddr; if (!npages) return 0; /* * Look for sets of physically contiguous pages in the user buffer. * This will allow us to optimize Expected RcvArray entry usage by * using the bigger supported sizes. */ vaddr = page_address(pages[0]); trace_hfi1_tid_flow_page(flow->req->qp, flow, 0, 0, 0, vaddr); for (pageidx = 0, pagecount = 1, i = 1; i <= npages; i++) { this_vaddr = i < npages ? page_address(pages[i]) : NULL; trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 0, 0, this_vaddr); /* * If the vaddr's are not sequential, pages are not physically * contiguous. */ if (this_vaddr != (vaddr + PAGE_SIZE)) { /* * At this point we have to loop over the set of * physically contiguous pages and break them down it * sizes supported by the HW. * There are two main constraints: * 1. The max buffer size is MAX_EXPECTED_BUFFER. * If the total set size is bigger than that * program only a MAX_EXPECTED_BUFFER chunk. * 2. The buffer size has to be a power of two. If * it is not, round down to the closes power of * 2 and program that size. */ while (pagecount) { int maxpages = pagecount; u32 bufsize = pagecount * PAGE_SIZE; if (bufsize > MAX_EXPECTED_BUFFER) maxpages = MAX_EXPECTED_BUFFER >> PAGE_SHIFT; else if (!is_power_of_2(bufsize)) maxpages = rounddown_pow_of_two(bufsize) >> PAGE_SHIFT; list[setcount].idx = pageidx; list[setcount].count = maxpages; trace_hfi1_tid_pageset(flow->req->qp, setcount, list[setcount].idx, list[setcount].count); pagecount -= maxpages; pageidx += maxpages; setcount++; } pageidx = i; pagecount = 1; vaddr = this_vaddr; } else { vaddr += PAGE_SIZE; pagecount++; } } /* insure we always return an even number of sets */ if (setcount & 1) list[setcount++].count = 0; return setcount; } /** * tid_flush_pages - dump out pages into pagesets * @list: list of pagesets * @idx: pointer to current page index * @pages: number of pages to dump * @sets: current number of pagesset * * This routine flushes out accumuated pages. * * To insure an even number of sets the * code may add a filler. * * This can happen with when pages is not * a power of 2 or pages is a power of 2 * less than the maximum pages. * * Return: * The new number of sets */ static u32 tid_flush_pages(struct tid_rdma_pageset *list, u32 *idx, u32 pages, u32 sets) { while (pages) { u32 maxpages = pages; if (maxpages > MAX_EXPECTED_PAGES) maxpages = MAX_EXPECTED_PAGES; else if (!is_power_of_2(maxpages)) maxpages = rounddown_pow_of_two(maxpages); list[sets].idx = *idx; list[sets++].count = maxpages; *idx += maxpages; pages -= maxpages; } /* might need a filler */ if (sets & 1) list[sets++].count = 0; return sets; } /** * tid_rdma_find_phys_blocks_8k - get groups base on mr info * @flow: overall info for a TID RDMA segment * @pages: pointer to an array of page structs * @npages: number of pages * @list: page set array to return * * This routine parses an array of pages to compute pagesets * in an 8k compatible way. * * pages are tested two at a time, i, i + 1 for contiguous * pages and i - 1 and i contiguous pages. * * If any condition is false, any accumlated pages are flushed and * v0,v1 are emitted as separate PAGE_SIZE pagesets * * Otherwise, the current 8k is totaled for a future flush. * * Return: * The number of pagesets * list set with the returned number of pagesets * */ static u32 tid_rdma_find_phys_blocks_8k(struct tid_rdma_flow *flow, struct page **pages, u32 npages, struct tid_rdma_pageset *list) { u32 idx, sets = 0, i; u32 pagecnt = 0; void *v0, *v1, *vm1; if (!npages) return 0; for (idx = 0, i = 0, vm1 = NULL; i < npages; i += 2) { /* get a new v0 */ v0 = page_address(pages[i]); trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 1, 0, v0); v1 = i + 1 < npages ? page_address(pages[i + 1]) : NULL; trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 1, 1, v1); /* compare i, i + 1 vaddr */ if (v1 != (v0 + PAGE_SIZE)) { /* flush out pages */ sets = tid_flush_pages(list, &idx, pagecnt, sets); /* output v0,v1 as two pagesets */ list[sets].idx = idx++; list[sets++].count = 1; if (v1) { list[sets].count = 1; list[sets++].idx = idx++; } else { list[sets++].count = 0; } vm1 = NULL; pagecnt = 0; continue; } /* i,i+1 consecutive, look at i-1,i */ if (vm1 && v0 != (vm1 + PAGE_SIZE)) { /* flush out pages */ sets = tid_flush_pages(list, &idx, pagecnt, sets); pagecnt = 0; } /* pages will always be a multiple of 8k */ pagecnt += 2; /* save i-1 */ vm1 = v1; /* move to next pair */ } /* dump residual pages at end */ sets = tid_flush_pages(list, &idx, npages - idx, sets); /* by design cannot be odd sets */ WARN_ON(sets & 1); return sets; } /* * Find pages for one segment of a sge array represented by @ss. The function * does not check the sge, the sge must have been checked for alignment with a * prior call to hfi1_kern_trdma_ok. Other sge checking is done as part of * rvt_lkey_ok and rvt_rkey_ok. Also, the function only modifies the local sge * copy maintained in @ss->sge, the original sge is not modified. * * Unlike IB RDMA WRITE, we can't decrement ss->num_sge here because we are not * releasing the MR reference count at the same time. Otherwise, we'll "leak" * references to the MR. This difference requires that we keep track of progress * into the sg_list. This is done by the cur_seg cursor in the tid_rdma_request * structure. */ static u32 kern_find_pages(struct tid_rdma_flow *flow, struct page **pages, struct rvt_sge_state *ss, bool *last) { struct tid_rdma_request *req = flow->req; struct rvt_sge *sge = &ss->sge; u32 length = flow->req->seg_len; u32 len = PAGE_SIZE; u32 i = 0; while (length && req->isge < ss->num_sge) { pages[i++] = virt_to_page(sge->vaddr); sge->vaddr += len; sge->length -= len; sge->sge_length -= len; if (!sge->sge_length) { if (++req->isge < ss->num_sge) *sge = ss->sg_list[req->isge - 1]; } else if (sge->length == 0 && sge->mr->lkey) { if (++sge->n >= RVT_SEGSZ) { ++sge->m; sge->n = 0; } sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; sge->length = sge->mr->map[sge->m]->segs[sge->n].length; } length -= len; } flow->length = flow->req->seg_len - length; *last = req->isge != ss->num_sge; return i; } static void dma_unmap_flow(struct tid_rdma_flow *flow) { struct hfi1_devdata *dd; int i; struct tid_rdma_pageset *pset; dd = flow->req->rcd->dd; for (i = 0, pset = &flow->pagesets[0]; i < flow->npagesets; i++, pset++) { if (pset->count && pset->addr) { dma_unmap_page(&dd->pcidev->dev, pset->addr, PAGE_SIZE * pset->count, DMA_FROM_DEVICE); pset->mapped = 0; } } } static int dma_map_flow(struct tid_rdma_flow *flow, struct page **pages) { int i; struct hfi1_devdata *dd = flow->req->rcd->dd; struct tid_rdma_pageset *pset; for (i = 0, pset = &flow->pagesets[0]; i < flow->npagesets; i++, pset++) { if (pset->count) { pset->addr = dma_map_page(&dd->pcidev->dev, pages[pset->idx], 0, PAGE_SIZE * pset->count, DMA_FROM_DEVICE); if (dma_mapping_error(&dd->pcidev->dev, pset->addr)) { dma_unmap_flow(flow); return -ENOMEM; } pset->mapped = 1; } } return 0; } static inline bool dma_mapped(struct tid_rdma_flow *flow) { return !!flow->pagesets[0].mapped; } /* * Get pages pointers and identify contiguous physical memory chunks for a * segment. All segments are of length flow->req->seg_len. */ static int kern_get_phys_blocks(struct tid_rdma_flow *flow, struct page **pages, struct rvt_sge_state *ss, bool *last) { u8 npages; /* Reuse previously computed pagesets, if any */ if (flow->npagesets) { trace_hfi1_tid_flow_alloc(flow->req->qp, flow->req->setup_head, flow); if (!dma_mapped(flow)) return dma_map_flow(flow, pages); return 0; } npages = kern_find_pages(flow, pages, ss, last); if (flow->req->qp->pmtu == enum_to_mtu(OPA_MTU_4096)) flow->npagesets = tid_rdma_find_phys_blocks_4k(flow, pages, npages, flow->pagesets); else flow->npagesets = tid_rdma_find_phys_blocks_8k(flow, pages, npages, flow->pagesets); return dma_map_flow(flow, pages); } static inline void kern_add_tid_node(struct tid_rdma_flow *flow, struct hfi1_ctxtdata *rcd, char *s, struct tid_group *grp, u8 cnt) { struct kern_tid_node *node = &flow->tnode[flow->tnode_cnt++]; WARN_ON_ONCE(flow->tnode_cnt >= (TID_RDMA_MAX_SEGMENT_SIZE >> PAGE_SHIFT)); if (WARN_ON_ONCE(cnt & 1)) dd_dev_err(rcd->dd, "unexpected odd allocation cnt %u map 0x%x used %u", cnt, grp->map, grp->used); node->grp = grp; node->map = grp->map; node->cnt = cnt; trace_hfi1_tid_node_add(flow->req->qp, s, flow->tnode_cnt - 1, grp->base, grp->map, grp->used, cnt); } /* * Try to allocate pageset_count TID's from TID groups for a context * * This function allocates TID's without moving groups between lists or * modifying grp->map. This is done as follows, being cogizant of the lists * between which the TID groups will move: * 1. First allocate complete groups of 8 TID's since this is more efficient, * these groups will move from group->full without affecting used * 2. If more TID's are needed allocate from used (will move from used->full or * stay in used) * 3. If we still don't have the required number of TID's go back and look again * at a complete group (will move from group->used) */ static int kern_alloc_tids(struct tid_rdma_flow *flow) { struct hfi1_ctxtdata *rcd = flow->req->rcd; struct hfi1_devdata *dd = rcd->dd; u32 ngroups, pageidx = 0; struct tid_group *group = NULL, *used; u8 use; flow->tnode_cnt = 0; ngroups = flow->npagesets / dd->rcv_entries.group_size; if (!ngroups) goto used_list; /* First look at complete groups */ list_for_each_entry(group, &rcd->tid_group_list.list, list) { kern_add_tid_node(flow, rcd, "complete groups", group, group->size); pageidx += group->size; if (!--ngroups) break; } if (pageidx >= flow->npagesets) goto ok; used_list: /* Now look at partially used groups */ list_for_each_entry(used, &rcd->tid_used_list.list, list) { use = min_t(u32, flow->npagesets - pageidx, used->size - used->used); kern_add_tid_node(flow, rcd, "used groups", used, use); pageidx += use; if (pageidx >= flow->npagesets) goto ok; } /* * Look again at a complete group, continuing from where we left. * However, if we are at the head, we have reached the end of the * complete groups list from the first loop above */ if (group && &group->list == &rcd->tid_group_list.list) goto bail_eagain; group = list_prepare_entry(group, &rcd->tid_group_list.list, list); if (list_is_last(&group->list, &rcd->tid_group_list.list)) goto bail_eagain; group = list_next_entry(group, list); use = min_t(u32, flow->npagesets - pageidx, group->size); kern_add_tid_node(flow, rcd, "complete continue", group, use); pageidx += use; if (pageidx >= flow->npagesets) goto ok; bail_eagain: trace_hfi1_msg_alloc_tids(flow->req->qp, " insufficient tids: needed ", (u64)flow->npagesets); return -EAGAIN; ok: return 0; } static void kern_program_rcv_group(struct tid_rdma_flow *flow, int grp_num, u32 *pset_idx) { struct hfi1_ctxtdata *rcd = flow->req->rcd; struct hfi1_devdata *dd = rcd->dd; struct kern_tid_node *node = &flow->tnode[grp_num]; struct tid_group *grp = node->grp; struct tid_rdma_pageset *pset; u32 pmtu_pg = flow->req->qp->pmtu >> PAGE_SHIFT; u32 rcventry, npages = 0, pair = 0, tidctrl; u8 i, cnt = 0; for (i = 0; i < grp->size; i++) { rcventry = grp->base + i; if (node->map & BIT(i) || cnt >= node->cnt) { rcv_array_wc_fill(dd, rcventry); continue; } pset = &flow->pagesets[(*pset_idx)++]; if (pset->count) { hfi1_put_tid(dd, rcventry, PT_EXPECTED, pset->addr, trdma_pset_order(pset)); } else { hfi1_put_tid(dd, rcventry, PT_INVALID, 0, 0); } npages += pset->count; rcventry -= rcd->expected_base; tidctrl = pair ? 0x3 : rcventry & 0x1 ? 0x2 : 0x1; /* * A single TID entry will be used to use a rcvarr pair (with * tidctrl 0x3), if ALL these are true (a) the bit pos is even * (b) the group map shows current and the next bits as free * indicating two consecutive rcvarry entries are available (c) * we actually need 2 more entries */ pair = !(i & 0x1) && !((node->map >> i) & 0x3) && node->cnt >= cnt + 2; if (!pair) { if (!pset->count) tidctrl = 0x1; flow->tid_entry[flow->tidcnt++] = EXP_TID_SET(IDX, rcventry >> 1) | EXP_TID_SET(CTRL, tidctrl) | EXP_TID_SET(LEN, npages); trace_hfi1_tid_entry_alloc(/* entry */ flow->req->qp, flow->tidcnt - 1, flow->tid_entry[flow->tidcnt - 1]); /* Efficient DIV_ROUND_UP(npages, pmtu_pg) */ flow->npkts += (npages + pmtu_pg - 1) >> ilog2(pmtu_pg); npages = 0; } if (grp->used == grp->size - 1) tid_group_move(grp, &rcd->tid_used_list, &rcd->tid_full_list); else if (!grp->used) tid_group_move(grp, &rcd->tid_group_list, &rcd->tid_used_list); grp->used++; grp->map |= BIT(i); cnt++; } } static void kern_unprogram_rcv_group(struct tid_rdma_flow *flow, int grp_num) { struct hfi1_ctxtdata *rcd = flow->req->rcd; struct hfi1_devdata *dd = rcd->dd; struct kern_tid_node *node = &flow->tnode[grp_num]; struct tid_group *grp = node->grp; u32 rcventry; u8 i, cnt = 0; for (i = 0; i < grp->size; i++) { rcventry = grp->base + i; if (node->map & BIT(i) || cnt >= node->cnt) { rcv_array_wc_fill(dd, rcventry); continue; } hfi1_put_tid(dd, rcventry, PT_INVALID, 0, 0); grp->used--; grp->map &= ~BIT(i); cnt++; if (grp->used == grp->size - 1) tid_group_move(grp, &rcd->tid_full_list, &rcd->tid_used_list); else if (!grp->used) tid_group_move(grp, &rcd->tid_used_list, &rcd->tid_group_list); } if (WARN_ON_ONCE(cnt & 1)) { struct hfi1_ctxtdata *rcd = flow->req->rcd; struct hfi1_devdata *dd = rcd->dd; dd_dev_err(dd, "unexpected odd free cnt %u map 0x%x used %u", cnt, grp->map, grp->used); } } static void kern_program_rcvarray(struct tid_rdma_flow *flow) { u32 pset_idx = 0; int i; flow->npkts = 0; flow->tidcnt = 0; for (i = 0; i < flow->tnode_cnt; i++) kern_program_rcv_group(flow, i, &pset_idx); trace_hfi1_tid_flow_alloc(flow->req->qp, flow->req->setup_head, flow); } /** * hfi1_kern_exp_rcv_setup() - setup TID's and flow for one segment of a * TID RDMA request * * @req: TID RDMA request for which the segment/flow is being set up * @ss: sge state, maintains state across successive segments of a sge * @last: set to true after the last sge segment has been processed * * This function * (1) finds a free flow entry in the flow circular buffer * (2) finds pages and continuous physical chunks constituing one segment * of an sge * (3) allocates TID group entries for those chunks * (4) programs rcvarray entries in the hardware corresponding to those * TID's * (5) computes a tidarray with formatted TID entries which can be sent * to the sender * (6) Reserves and programs HW flows. * (7) It also manages queing the QP when TID/flow resources are not * available. * * @req points to struct tid_rdma_request of which the segments are a part. The * function uses qp, rcd and seg_len members of @req. In the absence of errors, * req->flow_idx is the index of the flow which has been prepared in this * invocation of function call. With flow = &req->flows[req->flow_idx], * flow->tid_entry contains the TID array which the sender can use for TID RDMA * sends and flow->npkts contains number of packets required to send the * segment. * * hfi1_check_sge_align should be called prior to calling this function and if * it signals error TID RDMA cannot be used for this sge and this function * should not be called. * * For the queuing, caller must hold the flow->req->qp s_lock from the send * engine and the function will procure the exp_lock. * * Return: * The function returns -EAGAIN if sufficient number of TID/flow resources to * map the segment could not be allocated. In this case the function should be * called again with previous arguments to retry the TID allocation. There are * no other error returns. The function returns 0 on success. */ int hfi1_kern_exp_rcv_setup(struct tid_rdma_request *req, struct rvt_sge_state *ss, bool *last) __must_hold(&req->qp->s_lock) { struct tid_rdma_flow *flow = &req->flows[req->setup_head]; struct hfi1_ctxtdata *rcd = req->rcd; struct hfi1_qp_priv *qpriv = req->qp->priv; unsigned long flags; struct rvt_qp *fqp; u16 clear_tail = req->clear_tail; lockdep_assert_held(&req->qp->s_lock); /* * We return error if either (a) we don't have space in the flow * circular buffer, or (b) we already have max entries in the buffer. * Max entries depend on the type of request we are processing and the * negotiated TID RDMA parameters. */ if (!CIRC_SPACE(req->setup_head, clear_tail, MAX_FLOWS) || CIRC_CNT(req->setup_head, clear_tail, MAX_FLOWS) >= req->n_flows) return -EINVAL; /* * Get pages, identify contiguous physical memory chunks for the segment * If we can not determine a DMA address mapping we will treat it just * like if we ran out of space above. */ if (kern_get_phys_blocks(flow, qpriv->pages, ss, last)) { hfi1_wait_kmem(flow->req->qp); return -ENOMEM; } spin_lock_irqsave(&rcd->exp_lock, flags); if (kernel_tid_waiters(rcd, &rcd->rarr_queue, flow->req->qp)) goto queue; /* * At this point we know the number of pagesets and hence the number of * TID's to map the segment. Allocate the TID's from the TID groups. If * we cannot allocate the required number we exit and try again later */ if (kern_alloc_tids(flow)) goto queue; /* * Finally program the TID entries with the pagesets, compute the * tidarray and enable the HW flow */ kern_program_rcvarray(flow); /* * Setup the flow state with relevant information. * This information is used for tracking the sequence of data packets * for the segment. * The flow is setup here as this is the most accurate time and place * to do so. Doing at a later time runs the risk of the flow data in * qpriv getting out of sync. */ memset(&flow->flow_state, 0x0, sizeof(flow->flow_state)); flow->idx = qpriv->flow_state.index; flow->flow_state.generation = qpriv->flow_state.generation; flow->flow_state.spsn = qpriv->flow_state.psn; flow->flow_state.lpsn = flow->flow_state.spsn + flow->npkts - 1; flow->flow_state.r_next_psn = full_flow_psn(flow, flow->flow_state.spsn); qpriv->flow_state.psn += flow->npkts; dequeue_tid_waiter(rcd, &rcd->rarr_queue, flow->req->qp); /* get head before dropping lock */ fqp = first_qp(rcd, &rcd->rarr_queue); spin_unlock_irqrestore(&rcd->exp_lock, flags); tid_rdma_schedule_tid_wakeup(fqp); req->setup_head = (req->setup_head + 1) & (MAX_FLOWS - 1); return 0; queue: queue_qp_for_tid_wait(rcd, &rcd->rarr_queue, flow->req->qp); spin_unlock_irqrestore(&rcd->exp_lock, flags); return -EAGAIN; } static void hfi1_tid_rdma_reset_flow(struct tid_rdma_flow *flow) { flow->npagesets = 0; } /* * This function is called after one segment has been successfully sent to * release the flow and TID HW/SW resources for that segment. The segments for a * TID RDMA request are setup and cleared in FIFO order which is managed using a * circular buffer. */ int hfi1_kern_exp_rcv_clear(struct tid_rdma_request *req) __must_hold(&req->qp->s_lock) { struct tid_rdma_flow *flow = &req->flows[req->clear_tail]; struct hfi1_ctxtdata *rcd = req->rcd; unsigned long flags; int i; struct rvt_qp *fqp; lockdep_assert_held(&req->qp->s_lock); /* Exit if we have nothing in the flow circular buffer */ if (!CIRC_CNT(req->setup_head, req->clear_tail, MAX_FLOWS)) return -EINVAL; spin_lock_irqsave(&rcd->exp_lock, flags); for (i = 0; i < flow->tnode_cnt; i++) kern_unprogram_rcv_group(flow, i); /* To prevent double unprogramming */ flow->tnode_cnt = 0; /* get head before dropping lock */ fqp = first_qp(rcd, &rcd->rarr_queue); spin_unlock_irqrestore(&rcd->exp_lock, flags); dma_unmap_flow(flow); hfi1_tid_rdma_reset_flow(flow); req->clear_tail = (req->clear_tail + 1) & (MAX_FLOWS - 1); if (fqp == req->qp) { __trigger_tid_waiter(fqp); rvt_put_qp(fqp); } else { tid_rdma_schedule_tid_wakeup(fqp); } return 0; } /* * This function is called to release all the tid entries for * a request. */ void hfi1_kern_exp_rcv_clear_all(struct tid_rdma_request *req) __must_hold(&req->qp->s_lock) { /* Use memory barrier for proper ordering */ while (CIRC_CNT(req->setup_head, req->clear_tail, MAX_FLOWS)) { if (hfi1_kern_exp_rcv_clear(req)) break; } } /** * hfi1_kern_exp_rcv_free_flows - free priviously allocated flow information * @req: the tid rdma request to be cleaned */ static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req) { kfree(req->flows); req->flows = NULL; } /** * __trdma_clean_swqe - clean up for large sized QPs * @qp: the queue patch * @wqe: the send wqe */ void __trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe) { struct hfi1_swqe_priv *p = wqe->priv; hfi1_kern_exp_rcv_free_flows(&p->tid_req); } /* * This can be called at QP create time or in the data path. */ static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req, gfp_t gfp) { struct tid_rdma_flow *flows; int i; if (likely(req->flows)) return 0; flows = kmalloc_node(MAX_FLOWS * sizeof(*flows), gfp, req->rcd->numa_id); if (!flows) return -ENOMEM; /* mini init */ for (i = 0; i < MAX_FLOWS; i++) { flows[i].req = req; flows[i].npagesets = 0; flows[i].pagesets[0].mapped = 0; flows[i].resync_npkts = 0; } req->flows = flows; return 0; } static void hfi1_init_trdma_req(struct rvt_qp *qp, struct tid_rdma_request *req) { struct hfi1_qp_priv *qpriv = qp->priv; /* * Initialize various TID RDMA request variables. * These variables are "static", which is why they * can be pre-initialized here before the WRs has * even been submitted. * However, non-NULL values for these variables do not * imply that this WQE has been enabled for TID RDMA. * Drivers should check the WQE's opcode to determine * if a request is a TID RDMA one or not. */ req->qp = qp; req->rcd = qpriv->rcd; } u64 hfi1_access_sw_tid_wait(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = context; return dd->verbs_dev.n_tidwait; } static struct tid_rdma_flow *find_flow_ib(struct tid_rdma_request *req, u32 psn, u16 *fidx) { u16 head, tail; struct tid_rdma_flow *flow; head = req->setup_head; tail = req->clear_tail; for ( ; CIRC_CNT(head, tail, MAX_FLOWS); tail = CIRC_NEXT(tail, MAX_FLOWS)) { flow = &req->flows[tail]; if (cmp_psn(psn, flow->flow_state.ib_spsn) >= 0 && cmp_psn(psn, flow->flow_state.ib_lpsn) <= 0) { if (fidx) *fidx = tail; return flow; } } return NULL; } /* TID RDMA READ functions */ u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe, struct ib_other_headers *ohdr, u32 *bth1, u32 *bth2, u32 *len) { struct tid_rdma_request *req = wqe_to_tid_req(wqe); struct tid_rdma_flow *flow = &req->flows[req->flow_idx]; struct rvt_qp *qp = req->qp; struct hfi1_qp_priv *qpriv = qp->priv; struct hfi1_swqe_priv *wpriv = wqe->priv; struct tid_rdma_read_req *rreq = &ohdr->u.tid_rdma.r_req; struct tid_rdma_params *remote; u32 req_len = 0; void *req_addr = NULL; /* This is the IB psn used to send the request */ *bth2 = mask_psn(flow->flow_state.ib_spsn + flow->pkt); trace_hfi1_tid_flow_build_read_pkt(qp, req->flow_idx, flow); /* TID Entries for TID RDMA READ payload */ req_addr = &flow->tid_entry[flow->tid_idx]; req_len = sizeof(*flow->tid_entry) * (flow->tidcnt - flow->tid_idx); memset(&ohdr->u.tid_rdma.r_req, 0, sizeof(ohdr->u.tid_rdma.r_req)); wpriv->ss.sge.vaddr = req_addr; wpriv->ss.sge.sge_length = req_len; wpriv->ss.sge.length = wpriv->ss.sge.sge_length; /* * We can safely zero these out. Since the first SGE covers the * entire packet, nothing else should even look at the MR. */ wpriv->ss.sge.mr = NULL; wpriv->ss.sge.m = 0; wpriv->ss.sge.n = 0; wpriv->ss.sg_list = NULL; wpriv->ss.total_len = wpriv->ss.sge.sge_length; wpriv->ss.num_sge = 1; /* Construct the TID RDMA READ REQ packet header */ rcu_read_lock(); remote = rcu_dereference(qpriv->tid_rdma.remote); KDETH_RESET(rreq->kdeth0, KVER, 0x1); KDETH_RESET(rreq->kdeth1, JKEY, remote->jkey); rreq->reth.vaddr = cpu_to_be64(wqe->rdma_wr.remote_addr + req->cur_seg * req->seg_len + flow->sent); rreq->reth.rkey = cpu_to_be32(wqe->rdma_wr.rkey); rreq->reth.length = cpu_to_be32(*len); rreq->tid_flow_psn = cpu_to_be32((flow->flow_state.generation << HFI1_KDETH_BTH_SEQ_SHIFT) | ((flow->flow_state.spsn + flow->pkt) & HFI1_KDETH_BTH_SEQ_MASK)); rreq->tid_flow_qp = cpu_to_be32(qpriv->tid_rdma.local.qp | ((flow->idx & TID_RDMA_DESTQP_FLOW_MASK) << TID_RDMA_DESTQP_FLOW_SHIFT) | qpriv->rcd->ctxt); rreq->verbs_qp = cpu_to_be32(qp->remote_qpn); *bth1 &= ~RVT_QPN_MASK; *bth1 |= remote->qp; *bth2 |= IB_BTH_REQ_ACK; rcu_read_unlock(); /* We are done with this segment */ flow->sent += *len; req->cur_seg++; qp->s_state = TID_OP(READ_REQ); req->ack_pending++; req->flow_idx = (req->flow_idx + 1) & (MAX_FLOWS - 1); qpriv->pending_tid_r_segs++; qp->s_num_rd_atomic++; /* Set the TID RDMA READ request payload size */ *len = req_len; return sizeof(ohdr->u.tid_rdma.r_req) / sizeof(u32); } /* * @len: contains the data length to read upon entry and the read request * payload length upon exit. */ u32 hfi1_build_tid_rdma_read_req(struct rvt_qp *qp, struct rvt_swqe *wqe, struct ib_other_headers *ohdr, u32 *bth1, u32 *bth2, u32 *len) __must_hold(&qp->s_lock) { struct hfi1_qp_priv *qpriv = qp->priv; struct tid_rdma_request *req = wqe_to_tid_req(wqe); struct tid_rdma_flow *flow = NULL; u32 hdwords = 0; bool last; bool retry = true; u32 npkts = rvt_div_round_up_mtu(qp, *len); trace_hfi1_tid_req_build_read_req(qp, 0, wqe->wr.opcode, wqe->psn, wqe->lpsn, req); /* * Check sync conditions. Make sure that there are no pending * segments before freeing the flow. */ sync_check: if (req->state == TID_REQUEST_SYNC) { if (qpriv->pending_tid_r_segs) goto done; hfi1_kern_clear_hw_flow(req->rcd, qp); qpriv->s_flags &= ~HFI1_R_TID_SW_PSN; req->state = TID_REQUEST_ACTIVE; } /* * If the request for this segment is resent, the tid resources should * have been allocated before. In this case, req->flow_idx should * fall behind req->setup_head. */ if (req->flow_idx == req->setup_head) { retry = false; if (req->state == TID_REQUEST_RESEND) { /* * This is the first new segment for a request whose * earlier segments have been re-sent. We need to * set up the sge pointer correctly. */ restart_sge(&qp->s_sge, wqe, req->s_next_psn, qp->pmtu); req->isge = 0; req->state = TID_REQUEST_ACTIVE; } /* * Check sync. The last PSN of each generation is reserved for * RESYNC. */ if ((qpriv->flow_state.psn + npkts) > MAX_TID_FLOW_PSN - 1) { req->state = TID_REQUEST_SYNC; goto sync_check; } /* Allocate the flow if not yet */ if (hfi1_kern_setup_hw_flow(qpriv->rcd, qp)) goto done; /* * The following call will advance req->setup_head after * allocating the tid entries. */ if (hfi1_kern_exp_rcv_setup(req, &qp->s_sge, &last)) { req->state = TID_REQUEST_QUEUED; /* * We don't have resources for this segment. The QP has * already been queued. */ goto done; } } /* req->flow_idx should only be one slot behind req->setup_head */ flow = &req->flows[req->flow_idx]; flow->pkt = 0; flow->tid_idx = 0; flow->sent = 0; if (!retry) { /* Set the first and last IB PSN for the flow in use.*/ flow->flow_state.ib_spsn = req->s_next_psn; flow->flow_state.ib_lpsn = flow->flow_state.ib_spsn + flow->npkts - 1; } /* Calculate the next segment start psn.*/ req->s_next_psn += flow->npkts; /* Build the packet header */ hdwords = hfi1_build_tid_rdma_read_packet(wqe, ohdr, bth1, bth2, len); done: return hdwords; } /* * Validate and accept the TID RDMA READ request parameters. * Return 0 if the request is accepted successfully; * Return 1 otherwise. */ static int tid_rdma_rcv_read_request(struct rvt_qp *qp, struct rvt_ack_entry *e, struct hfi1_packet *packet, struct ib_other_headers *ohdr, u32 bth0, u32 psn, u64 vaddr, u32 len) { struct hfi1_qp_priv *qpriv = qp->priv; struct tid_rdma_request *req; struct tid_rdma_flow *flow; u32 flow_psn, i, tidlen = 0, pktlen, tlen; req = ack_to_tid_req(e); /* Validate the payload first */ flow = &req->flows[req->setup_head]; /* payload length = packet length - (header length + ICRC length) */ pktlen = packet->tlen - (packet->hlen + 4); if (pktlen > sizeof(flow->tid_entry)) return 1; memcpy(flow->tid_entry, packet->ebuf, pktlen); flow->tidcnt = pktlen / sizeof(*flow->tid_entry); /* * Walk the TID_ENTRY list to make sure we have enough space for a * complete segment. Also calculate the number of required packets. */ flow->npkts = rvt_div_round_up_mtu(qp, len); for (i = 0; i < flow->tidcnt; i++) { trace_hfi1_tid_entry_rcv_read_req(qp, i, flow->tid_entry[i]); tlen = EXP_TID_GET(flow->tid_entry[i], LEN); if (!tlen) return 1; /* * For tid pair (tidctr == 3), the buffer size of the pair * should be the sum of the buffer size described by each * tid entry. However, only the first entry needs to be * specified in the request (see WFR HAS Section 8.5.7.1). */ tidlen += tlen; } if (tidlen * PAGE_SIZE < len) return 1; /* Empty the flow array */ req->clear_tail = req->setup_head; flow->pkt = 0; flow->tid_idx = 0; flow->tid_offset = 0; flow->sent = 0; flow->tid_qpn = be32_to_cpu(ohdr->u.tid_rdma.r_req.tid_flow_qp); flow->idx = (flow->tid_qpn >> TID_RDMA_DESTQP_FLOW_SHIFT) & TID_RDMA_DESTQP_FLOW_MASK; flow_psn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.r_req.tid_flow_psn)); flow->flow_state.generation = flow_psn >> HFI1_KDETH_BTH_SEQ_SHIFT; flow->flow_state.spsn = flow_psn & HFI1_KDETH_BTH_SEQ_MASK; flow->length = len; flow->flow_state.lpsn = flow->flow_state.spsn + flow->npkts - 1; flow->flow_state.ib_spsn = psn; flow->flow_state.ib_lpsn = flow->flow_state.ib_spsn + flow->npkts - 1; trace_hfi1_tid_flow_rcv_read_req(qp, req->setup_head, flow); /* Set the initial flow index to the current flow. */ req->flow_idx = req->setup_head; /* advance circular buffer head */ req->setup_head = (req->setup_head + 1) & (MAX_FLOWS - 1); /* * Compute last PSN for request. */ e->opcode = (bth0 >> 24) & 0xff; e->psn = psn; e->lpsn = psn + flow->npkts - 1; e->sent = 0; req->n_flows = qpriv->tid_rdma.local.max_read; req->state = TID_REQUEST_ACTIVE; req->cur_seg = 0; req->comp_seg = 0; req->ack_seg = 0; req->isge = 0; req->seg_len = qpriv->tid_rdma.local.max_len; req->total_len = len; req->total_segs = 1; req->r_flow_psn = e->psn; trace_hfi1_tid_req_rcv_read_req(qp, 0, e->opcode, e->psn, e->lpsn, req); return 0; } static int tid_rdma_rcv_error(struct hfi1_packet *packet, struct ib_other_headers *ohdr, struct rvt_qp *qp, u32 psn, int diff) { struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); struct hfi1_ctxtdata *rcd = ((struct hfi1_qp_priv *)qp->priv)->rcd; struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); struct hfi1_qp_priv *qpriv = qp->priv; struct rvt_ack_entry *e; struct tid_rdma_request *req; unsigned long flags; u8 prev; bool old_req; trace_hfi1_rsp_tid_rcv_error(qp, psn); trace_hfi1_tid_rdma_rcv_err(qp, 0, psn, diff); if (diff > 0) { /* sequence error */ if (!qp->r_nak_state) { ibp->rvp.n_rc_seqnak++; qp->r_nak_state = IB_NAK_PSN_ERROR; qp->r_ack_psn = qp->r_psn; rc_defered_ack(rcd, qp); } goto done; } ibp->rvp.n_rc_dupreq++; spin_lock_irqsave(&qp->s_lock, flags); e = find_prev_entry(qp, psn, &prev, NULL, &old_req); if (!e || (e->opcode != TID_OP(READ_REQ) && e->opcode != TID_OP(WRITE_REQ))) goto unlock; req = ack_to_tid_req(e); req->r_flow_psn = psn; trace_hfi1_tid_req_rcv_err(qp, 0, e->opcode, e->psn, e->lpsn, req); if (e->opcode == TID_OP(READ_REQ)) { struct ib_reth *reth; u32 len; u32 rkey; u64 vaddr; int ok; u32 bth0; reth = &ohdr->u.tid_rdma.r_req.reth; /* * The requester always restarts from the start of the original * request. */ len = be32_to_cpu(reth->length); if (psn != e->psn || len != req->total_len) goto unlock; release_rdma_sge_mr(e); rkey = be32_to_cpu(reth->rkey); vaddr = get_ib_reth_vaddr(reth); qp->r_len = len; ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey, IB_ACCESS_REMOTE_READ); if (unlikely(!ok)) goto unlock; /* * If all the response packets for the current request have * been sent out and this request is complete (old_request * == false) and the TID flow may be unusable (the * req->clear_tail is advanced). However, when an earlier * request is received, this request will not be complete any * more (qp->s_tail_ack_queue is moved back, see below). * Consequently, we need to update the TID flow info everytime * a duplicate request is received. */ bth0 = be32_to_cpu(ohdr->bth[0]); if (tid_rdma_rcv_read_request(qp, e, packet, ohdr, bth0, psn, vaddr, len)) goto unlock; /* * True if the request is already scheduled (between * qp->s_tail_ack_queue and qp->r_head_ack_queue); */ if (old_req) goto unlock; } else { struct flow_state *fstate; bool schedule = false; u8 i; if (req->state == TID_REQUEST_RESEND) { req->state = TID_REQUEST_RESEND_ACTIVE; } else if (req->state == TID_REQUEST_INIT_RESEND) { req->state = TID_REQUEST_INIT; schedule = true; } /* * True if the request is already scheduled (between * qp->s_tail_ack_queue and qp->r_head_ack_queue). * Also, don't change requests, which are at the SYNC * point and haven't generated any responses yet. * There is nothing to retransmit for them yet. */ if (old_req || req->state == TID_REQUEST_INIT || (req->state == TID_REQUEST_SYNC && !req->cur_seg)) { for (i = prev + 1; ; i++) { if (i > rvt_size_atomic(&dev->rdi)) i = 0; if (i == qp->r_head_ack_queue) break; e = &qp->s_ack_queue[i]; req = ack_to_tid_req(e); if (e->opcode == TID_OP(WRITE_REQ) && req->state == TID_REQUEST_INIT) req->state = TID_REQUEST_INIT_RESEND; } /* * If the state of the request has been changed, * the first leg needs to get scheduled in order to * pick up the change. Otherwise, normal response * processing should take care of it. */ if (!schedule) goto unlock; } /* * If there is no more allocated segment, just schedule the qp * without changing any state. */ if (req->clear_tail == req->setup_head) goto schedule; /* * If this request has sent responses for segments, which have * not received data yet (flow_idx != clear_tail), the flow_idx * pointer needs to be adjusted so the same responses can be * re-sent. */ if (CIRC_CNT(req->flow_idx, req->clear_tail, MAX_FLOWS)) { fstate = &req->flows[req->clear_tail].flow_state; qpriv->pending_tid_w_segs -= CIRC_CNT(req->flow_idx, req->clear_tail, MAX_FLOWS); req->flow_idx = CIRC_ADD(req->clear_tail, delta_psn(psn, fstate->resp_ib_psn), MAX_FLOWS); qpriv->pending_tid_w_segs += delta_psn(psn, fstate->resp_ib_psn); /* * When flow_idx == setup_head, we've gotten a duplicate * request for a segment, which has not been allocated * yet. In that case, don't adjust this request. * However, we still want to go through the loop below * to adjust all subsequent requests. */ if (CIRC_CNT(req->setup_head, req->flow_idx, MAX_FLOWS)) { req->cur_seg = delta_psn(psn, e->psn); req->state = TID_REQUEST_RESEND_ACTIVE; } } for (i = prev + 1; ; i++) { /* * Look at everything up to and including * s_tail_ack_queue */ if (i > rvt_size_atomic(&dev->rdi)) i = 0; if (i == qp->r_head_ack_queue) break; e = &qp->s_ack_queue[i]; req = ack_to_tid_req(e); trace_hfi1_tid_req_rcv_err(qp, 0, e->opcode, e->psn, e->lpsn, req); if (e->opcode != TID_OP(WRITE_REQ) || req->cur_seg == req->comp_seg || req->state == TID_REQUEST_INIT || req->state == TID_REQUEST_INIT_RESEND) { if (req->state == TID_REQUEST_INIT) req->state = TID_REQUEST_INIT_RESEND; continue; } qpriv->pending_tid_w_segs -= CIRC_CNT(req->flow_idx, req->clear_tail, MAX_FLOWS); req->flow_idx = req->clear_tail; req->state = TID_REQUEST_RESEND; req->cur_seg = req->comp_seg; } qpriv->s_flags &= ~HFI1_R_TID_WAIT_INTERLCK; } /* Re-process old requests.*/ if (qp->s_acked_ack_queue == qp->s_tail_ack_queue) qp->s_acked_ack_queue = prev; qp->s_tail_ack_queue = prev; /* * Since the qp->s_tail_ack_queue is modified, the * qp->s_ack_state must be changed to re-initialize * qp->s_ack_rdma_sge; Otherwise, we will end up in * wrong memory region. */ qp->s_ack_state = OP(ACKNOWLEDGE); schedule: /* * It's possible to receive a retry psn that is earlier than an RNRNAK * psn. In this case, the rnrnak state should be cleared. */ if (qpriv->rnr_nak_state) { qp->s_nak_state = 0; qpriv->rnr_nak_state = TID_RNR_NAK_INIT; qp->r_psn = e->lpsn + 1; hfi1_tid_write_alloc_resources(qp, true); } qp->r_state = e->opcode; qp->r_nak_state = 0; qp->s_flags |= RVT_S_RESP_PENDING; hfi1_schedule_send(qp); unlock: spin_unlock_irqrestore(&qp->s_lock, flags); done: return 1; } void hfi1_rc_rcv_tid_rdma_read_req(struct hfi1_packet *packet) { /* HANDLER FOR TID RDMA READ REQUEST packet (Responder side)*/ /* * 1. Verify TID RDMA READ REQ as per IB_OPCODE_RC_RDMA_READ * (see hfi1_rc_rcv()) * 2. Put TID RDMA READ REQ into the response queueu (s_ack_queue) * - Setup struct tid_rdma_req with request info * - Initialize struct tid_rdma_flow info; * - Copy TID entries; * 3. Set the qp->s_ack_state. * 4. Set RVT_S_RESP_PENDING in s_flags. * 5. Kick the send engine (hfi1_schedule_send()) */ struct hfi1_ctxtdata *rcd = packet->rcd; struct rvt_qp *qp = packet->qp; struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); struct ib_other_headers *ohdr = packet->ohdr; struct rvt_ack_entry *e; unsigned long flags; struct ib_reth *reth; struct hfi1_qp_priv *qpriv = qp->priv; u32 bth0, psn, len, rkey; bool fecn; u8 next; u64 vaddr; int diff; u8 nack_state = IB_NAK_INVALID_REQUEST; bth0 = be32_to_cpu(ohdr->bth[0]); if (hfi1_ruc_check_hdr(ibp, packet)) return; fecn = process_ecn(qp, packet); psn = mask_psn(be32_to_cpu(ohdr->bth[2])); trace_hfi1_rsp_rcv_tid_read_req(qp, psn); if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) rvt_comm_est(qp); if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) goto nack_inv; reth = &ohdr->u.tid_rdma.r_req.reth; vaddr = be64_to_cpu(reth->vaddr); len = be32_to_cpu(reth->length); /* The length needs to be in multiples of PAGE_SIZE */ if (!len || len & ~PAGE_MASK || len > qpriv->tid_rdma.local.max_len) goto nack_inv; diff = delta_psn(psn, qp->r_psn); if (unlikely(diff)) { tid_rdma_rcv_err(packet, ohdr, qp, psn, diff, fecn); return; } /* We've verified the request, insert it into the ack queue. */ next = qp->r_head_ack_queue + 1; if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device))) next = 0; spin_lock_irqsave(&qp->s_lock, flags); if (unlikely(next == qp->s_tail_ack_queue)) { if (!qp->s_ack_queue[next].sent) { nack_state = IB_NAK_REMOTE_OPERATIONAL_ERROR; goto nack_inv_unlock; } update_ack_queue(qp, next); } e = &qp->s_ack_queue[qp->r_head_ack_queue]; release_rdma_sge_mr(e); rkey = be32_to_cpu(reth->rkey); qp->r_len = len; if (unlikely(!rvt_rkey_ok(qp, &e->rdma_sge, qp->r_len, vaddr, rkey, IB_ACCESS_REMOTE_READ))) goto nack_acc; /* Accept the request parameters */ if (tid_rdma_rcv_read_request(qp, e, packet, ohdr, bth0, psn, vaddr, len)) goto nack_inv_unlock; qp->r_state = e->opcode; qp->r_nak_state = 0; /* * We need to increment the MSN here instead of when we * finish sending the result since a duplicate request would * increment it more than once. */ qp->r_msn++; qp->r_psn += e->lpsn - e->psn + 1; qp->r_head_ack_queue = next; /* * For all requests other than TID WRITE which are added to the ack * queue, qpriv->r_tid_alloc follows qp->r_head_ack_queue. It is ok to * do this because of interlocks between these and TID WRITE * requests. The same change has also been made in hfi1_rc_rcv(). */ qpriv->r_tid_alloc = qp->r_head_ack_queue; /* Schedule the send tasklet. */ qp->s_flags |= RVT_S_RESP_PENDING; if (fecn) qp->s_flags |= RVT_S_ECN; hfi1_schedule_send(qp); spin_unlock_irqrestore(&qp->s_lock, flags); return; nack_inv_unlock: spin_unlock_irqrestore(&qp->s_lock, flags); nack_inv: rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR); qp->r_nak_state = nack_state; qp->r_ack_psn = qp->r_psn; /* Queue NAK for later */ rc_defered_ack(rcd, qp); return; nack_acc: spin_unlock_irqrestore(&qp->s_lock, flags); rvt_rc_error(qp, IB_WC_LOC_PROT_ERR); qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR; qp->r_ack_psn = qp->r_psn; } u32 hfi1_build_tid_rdma_read_resp(struct rvt_qp *qp, struct rvt_ack_entry *e, struct ib_other_headers *ohdr, u32 *bth0, u32 *bth1, u32 *bth2, u32 *len, bool *last) { struct hfi1_ack_priv *epriv = e->priv; struct tid_rdma_request *req = &epriv->tid_req; struct hfi1_qp_priv *qpriv = qp->priv; struct tid_rdma_flow *flow = &req->flows[req->clear_tail]; u32 tidentry = flow->tid_entry[flow->tid_idx]; u32 tidlen = EXP_TID_GET(tidentry, LEN) << PAGE_SHIFT; struct tid_rdma_read_resp *resp = &ohdr->u.tid_rdma.r_rsp; u32 next_offset, om = KDETH_OM_LARGE; bool last_pkt; u32 hdwords = 0; struct tid_rdma_params *remote; *len = min_t(u32, qp->pmtu, tidlen - flow->tid_offset); flow->sent += *len; next_offset = flow->tid_offset + *len; last_pkt = (flow->sent >= flow->length); trace_hfi1_tid_entry_build_read_resp(qp, flow->tid_idx, tidentry); trace_hfi1_tid_flow_build_read_resp(qp, req->clear_tail, flow); rcu_read_lock(); remote = rcu_dereference(qpriv->tid_rdma.remote); if (!remote) { rcu_read_unlock(); goto done; } KDETH_RESET(resp->kdeth0, KVER, 0x1); KDETH_SET(resp->kdeth0, SH, !last_pkt); KDETH_SET(resp->kdeth0, INTR, !!(!last_pkt && remote->urg)); KDETH_SET(resp->kdeth0, TIDCTRL, EXP_TID_GET(tidentry, CTRL)); KDETH_SET(resp->kdeth0, TID, EXP_TID_GET(tidentry, IDX)); KDETH_SET(resp->kdeth0, OM, om == KDETH_OM_LARGE); KDETH_SET(resp->kdeth0, OFFSET, flow->tid_offset / om); KDETH_RESET(resp->kdeth1, JKEY, remote->jkey); resp->verbs_qp = cpu_to_be32(qp->remote_qpn); rcu_read_unlock(); resp->aeth = rvt_compute_aeth(qp); resp->verbs_psn = cpu_to_be32(mask_psn(flow->flow_state.ib_spsn + flow->pkt)); *bth0 = TID_OP(READ_RESP) << 24; *bth1 = flow->tid_qpn; *bth2 = mask_psn(((flow->flow_state.spsn + flow->pkt++) & HFI1_KDETH_BTH_SEQ_MASK) | (flow->flow_state.generation << HFI1_KDETH_BTH_SEQ_SHIFT)); *last = last_pkt; if (last_pkt) /* Advance to next flow */ req->clear_tail = (req->clear_tail + 1) & (MAX_FLOWS - 1); if (next_offset >= tidlen) { flow->tid_offset = 0; flow->tid_idx++; } else { flow->tid_offset = next_offset; } hdwords = sizeof(ohdr->u.tid_rdma.r_rsp) / sizeof(u32); done: return hdwords; } static inline struct tid_rdma_request * find_tid_request(struct rvt_qp *qp, u32 psn, enum ib_wr_opcode opcode) __must_hold(&qp->s_lock) { struct rvt_swqe *wqe; struct tid_rdma_request *req = NULL; u32 i, end; end = qp->s_cur + 1; if (end == qp->s_size) end = 0; for (i = qp->s_acked; i != end;) { wqe = rvt_get_swqe_ptr(qp, i); if (cmp_psn(psn, wqe->psn) >= 0 && cmp_psn(psn, wqe->lpsn) <= 0) { if (wqe->wr.opcode == opcode) req = wqe_to_tid_req(wqe); break; } if (++i == qp->s_size) i = 0; } return req; } void hfi1_rc_rcv_tid_rdma_read_resp(struct hfi1_packet *packet) { /* HANDLER FOR TID RDMA READ RESPONSE packet (Requestor side */ /* * 1. Find matching SWQE * 2. Check that the entire segment has been read. * 3. Remove HFI1_S_WAIT_TID_RESP from s_flags. * 4. Free the TID flow resources. * 5. Kick the send engine (hfi1_schedule_send()) */ struct ib_other_headers *ohdr = packet->ohdr; struct rvt_qp *qp = packet->qp; struct hfi1_qp_priv *priv = qp->priv; struct hfi1_ctxtdata *rcd = packet->rcd; struct tid_rdma_request *req; struct tid_rdma_flow *flow; u32 opcode, aeth; bool fecn; unsigned long flags; u32 kpsn, ipsn; trace_hfi1_sender_rcv_tid_read_resp(qp); fecn = process_ecn(qp, packet); kpsn = mask_psn(be32_to_cpu(ohdr->bth[2])); aeth = be32_to_cpu(ohdr->u.tid_rdma.r_rsp.aeth); opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff; spin_lock_irqsave(&qp->s_lock, flags); ipsn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_psn)); req = find_tid_request(qp, ipsn, IB_WR_TID_RDMA_READ); if (unlikely(!req)) goto ack_op_err; flow = &req->flows[req->clear_tail]; /* When header suppression is disabled */ if (cmp_psn(ipsn, flow->flow_state.ib_lpsn)) { update_r_next_psn_fecn(packet, priv, rcd, flow, fecn); if (cmp_psn(kpsn, flow->flow_state.r_next_psn)) goto ack_done; flow->flow_state.r_next_psn = mask_psn(kpsn + 1); /* * Copy the payload to destination buffer if this packet is * delivered as an eager packet due to RSM rule and FECN. * The RSM rule selects FECN bit in BTH and SH bit in * KDETH header and therefore will not match the last * packet of each segment that has SH bit cleared. */ if (fecn && packet->etype == RHF_RCV_TYPE_EAGER) { struct rvt_sge_state ss; u32 len; u32 tlen = packet->tlen; u16 hdrsize = packet->hlen; u8 pad = packet->pad; u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2); u32 pmtu = qp->pmtu; if (unlikely(tlen != (hdrsize + pmtu + extra_bytes))) goto ack_op_err; len = restart_sge(&ss, req->e.swqe, ipsn, pmtu); if (unlikely(len < pmtu)) goto ack_op_err; rvt_copy_sge(qp, &ss, packet->payload, pmtu, false, false); /* Raise the sw sequence check flag for next packet */ priv->s_flags |= HFI1_R_TID_SW_PSN; } goto ack_done; } flow->flow_state.r_next_psn = mask_psn(kpsn + 1); req->ack_pending--; priv->pending_tid_r_segs--; qp->s_num_rd_atomic--; if ((qp->s_flags & RVT_S_WAIT_FENCE) && !qp->s_num_rd_atomic) { qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_ACK); hfi1_schedule_send(qp); } if (qp->s_flags & RVT_S_WAIT_RDMAR) { qp->s_flags &= ~(RVT_S_WAIT_RDMAR | RVT_S_WAIT_ACK); hfi1_schedule_send(qp); } trace_hfi1_ack(qp, ipsn); trace_hfi1_tid_req_rcv_read_resp(qp, 0, req->e.swqe->wr.opcode, req->e.swqe->psn, req->e.swqe->lpsn, req); trace_hfi1_tid_flow_rcv_read_resp(qp, req->clear_tail, flow); /* Release the tid resources */ hfi1_kern_exp_rcv_clear(req); if (!do_rc_ack(qp, aeth, ipsn, opcode, 0, rcd)) goto ack_done; /* If not done yet, build next read request */ if (++req->comp_seg >= req->total_segs) { priv->tid_r_comp++; req->state = TID_REQUEST_COMPLETE; } /* * Clear the hw flow under two conditions: * 1. This request is a sync point and it is complete; * 2. Current request is completed and there are no more requests. */ if ((req->state == TID_REQUEST_SYNC && req->comp_seg == req->cur_seg) || priv->tid_r_comp == priv->tid_r_reqs) { hfi1_kern_clear_hw_flow(priv->rcd, qp); priv->s_flags &= ~HFI1_R_TID_SW_PSN; if (req->state == TID_REQUEST_SYNC) req->state = TID_REQUEST_ACTIVE; } hfi1_schedule_send(qp); goto ack_done; ack_op_err: /* * The test indicates that the send engine has finished its cleanup * after sending the request and it's now safe to put the QP into error * state. However, if the wqe queue is empty (qp->s_acked == qp->s_tail * == qp->s_head), it would be unsafe to complete the wqe pointed by * qp->s_acked here. Putting the qp into error state will safely flush * all remaining requests. */ if (qp->s_last == qp->s_acked) rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); ack_done: spin_unlock_irqrestore(&qp->s_lock, flags); } void hfi1_kern_read_tid_flow_free(struct rvt_qp *qp) __must_hold(&qp->s_lock) { u32 n = qp->s_acked; struct rvt_swqe *wqe; struct tid_rdma_request *req; struct hfi1_qp_priv *priv = qp->priv; lockdep_assert_held(&qp->s_lock); /* Free any TID entries */ while (n != qp->s_tail) { wqe = rvt_get_swqe_ptr(qp, n); if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) { req = wqe_to_tid_req(wqe); hfi1_kern_exp_rcv_clear_all(req); } if (++n == qp->s_size) n = 0; } /* Free flow */ hfi1_kern_clear_hw_flow(priv->rcd, qp); } static bool tid_rdma_tid_err(struct hfi1_packet *packet, u8 rcv_type) { struct rvt_qp *qp = packet->qp; if (rcv_type >= RHF_RCV_TYPE_IB) goto done; spin_lock(&qp->s_lock); /* * We've ran out of space in the eager buffer. * Eagerly received KDETH packets which require space in the * Eager buffer (packet that have payload) are TID RDMA WRITE * response packets. In this case, we have to re-transmit the * TID RDMA WRITE request. */ if (rcv_type == RHF_RCV_TYPE_EAGER) { hfi1_restart_rc(qp, qp->s_last_psn + 1, 1); hfi1_schedule_send(qp); } /* Since no payload is delivered, just drop the packet */ spin_unlock(&qp->s_lock); done: return true; } static void restart_tid_rdma_read_req(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp, struct rvt_swqe *wqe) { struct tid_rdma_request *req; struct tid_rdma_flow *flow; /* Start from the right segment */ qp->r_flags |= RVT_R_RDMAR_SEQ; req = wqe_to_tid_req(wqe); flow = &req->flows[req->clear_tail]; hfi1_restart_rc(qp, flow->flow_state.ib_spsn, 0); if (list_empty(&qp->rspwait)) { qp->r_flags |= RVT_R_RSP_SEND; rvt_get_qp(qp); list_add_tail(&qp->rspwait, &rcd->qp_wait_list); } } /* * Handle the KDETH eflags for TID RDMA READ response. * * Return true if the last packet for a segment has been received and it is * time to process the response normally; otherwise, return true. * * The caller must hold the packet->qp->r_lock and the rcu_read_lock. */ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd, struct hfi1_packet *packet, u8 rcv_type, u8 rte, u32 psn, u32 ibpsn) __must_hold(&packet->qp->r_lock) __must_hold(RCU) { struct hfi1_pportdata *ppd = rcd->ppd; struct hfi1_devdata *dd = ppd->dd; struct hfi1_ibport *ibp; struct rvt_swqe *wqe; struct tid_rdma_request *req; struct tid_rdma_flow *flow; u32 ack_psn; struct rvt_qp *qp = packet->qp; struct hfi1_qp_priv *priv = qp->priv; bool ret = true; int diff = 0; u32 fpsn; lockdep_assert_held(&qp->r_lock); trace_hfi1_rsp_read_kdeth_eflags(qp, ibpsn); trace_hfi1_sender_read_kdeth_eflags(qp); trace_hfi1_tid_read_sender_kdeth_eflags(qp, 0); spin_lock(&qp->s_lock); /* If the psn is out of valid range, drop the packet */ if (cmp_psn(ibpsn, qp->s_last_psn) < 0 || cmp_psn(ibpsn, qp->s_psn) > 0) goto s_unlock; /* * Note that NAKs implicitly ACK outstanding SEND and RDMA write * requests and implicitly NAK RDMA read and atomic requests issued * before the NAK'ed request. */ ack_psn = ibpsn - 1; wqe = rvt_get_swqe_ptr(qp, qp->s_acked); ibp = to_iport(qp->ibqp.device, qp->port_num); /* Complete WQEs that the PSN finishes. */ while ((int)delta_psn(ack_psn, wqe->lpsn) >= 0) { /* * If this request is a RDMA read or atomic, and the NACK is * for a later operation, this NACK NAKs the RDMA read or * atomic. */ if (wqe->wr.opcode == IB_WR_RDMA_READ || wqe->wr.opcode == IB_WR_TID_RDMA_READ || wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { /* Retry this request. */ if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) { qp->r_flags |= RVT_R_RDMAR_SEQ; if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) { restart_tid_rdma_read_req(rcd, qp, wqe); } else { hfi1_restart_rc(qp, qp->s_last_psn + 1, 0); if (list_empty(&qp->rspwait)) { qp->r_flags |= RVT_R_RSP_SEND; rvt_get_qp(qp); list_add_tail(/* wait */ &qp->rspwait, &rcd->qp_wait_list); } } } /* * No need to process the NAK since we are * restarting an earlier request. */ break; } wqe = do_rc_completion(qp, wqe, ibp); if (qp->s_acked == qp->s_tail) goto s_unlock; } if (qp->s_acked == qp->s_tail) goto s_unlock; /* Handle the eflags for the request */ if (wqe->wr.opcode != IB_WR_TID_RDMA_READ) goto s_unlock; req = wqe_to_tid_req(wqe); trace_hfi1_tid_req_read_kdeth_eflags(qp, 0, wqe->wr.opcode, wqe->psn, wqe->lpsn, req); switch (rcv_type) { case RHF_RCV_TYPE_EXPECTED: switch (rte) { case RHF_RTE_EXPECTED_FLOW_SEQ_ERR: /* * On the first occurrence of a Flow Sequence error, * the flag TID_FLOW_SW_PSN is set. * * After that, the flow is *not* reprogrammed and the * protocol falls back to SW PSN checking. This is done * to prevent continuous Flow Sequence errors for any * packets that could be still in the fabric. */ flow = &req->flows[req->clear_tail]; trace_hfi1_tid_flow_read_kdeth_eflags(qp, req->clear_tail, flow); if (priv->s_flags & HFI1_R_TID_SW_PSN) { diff = cmp_psn(psn, flow->flow_state.r_next_psn); if (diff > 0) { /* Drop the packet.*/ goto s_unlock; } else if (diff < 0) { /* * If a response packet for a restarted * request has come back, reset the * restart flag. */ if (qp->r_flags & RVT_R_RDMAR_SEQ) qp->r_flags &= ~RVT_R_RDMAR_SEQ; /* Drop the packet.*/ goto s_unlock; } /* * If SW PSN verification is successful and * this is the last packet in the segment, tell * the caller to process it as a normal packet. */ fpsn = full_flow_psn(flow, flow->flow_state.lpsn); if (cmp_psn(fpsn, psn) == 0) { ret = false; if (qp->r_flags & RVT_R_RDMAR_SEQ) qp->r_flags &= ~RVT_R_RDMAR_SEQ; } flow->flow_state.r_next_psn = mask_psn(psn + 1); } else { u32 last_psn; last_psn = read_r_next_psn(dd, rcd->ctxt, flow->idx); flow->flow_state.r_next_psn = last_psn; priv->s_flags |= HFI1_R_TID_SW_PSN; /* * If no request has been restarted yet, * restart the current one. */ if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) restart_tid_rdma_read_req(rcd, qp, wqe); } break; case RHF_RTE_EXPECTED_FLOW_GEN_ERR: /* * Since the TID flow is able to ride through * generation mismatch, drop this stale packet. */ break; default: break; } break; case RHF_RCV_TYPE_ERROR: switch (rte) { case RHF_RTE_ERROR_OP_CODE_ERR: case RHF_RTE_ERROR_KHDR_MIN_LEN_ERR: case RHF_RTE_ERROR_KHDR_HCRC_ERR: case RHF_RTE_ERROR_KHDR_KVER_ERR: case RHF_RTE_ERROR_CONTEXT_ERR: case RHF_RTE_ERROR_KHDR_TID_ERR: default: break; } break; default: break; } s_unlock: spin_unlock(&qp->s_lock); return ret; } bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd, struct hfi1_packet *packet) { struct hfi1_ibport *ibp = &ppd->ibport_data; struct hfi1_devdata *dd = ppd->dd; struct rvt_dev_info *rdi = &dd->verbs_dev.rdi; u8 rcv_type = rhf_rcv_type(packet->rhf); u8 rte = rhf_rcv_type_err(packet->rhf); struct ib_header *hdr = packet->hdr; struct ib_other_headers *ohdr = NULL; int lnh = be16_to_cpu(hdr->lrh[0]) & 3; u16 lid = be16_to_cpu(hdr->lrh[1]); u8 opcode; u32 qp_num, psn, ibpsn; struct rvt_qp *qp; struct hfi1_qp_priv *qpriv; unsigned long flags; bool ret = true; struct rvt_ack_entry *e; struct tid_rdma_request *req; struct tid_rdma_flow *flow; int diff = 0; trace_hfi1_msg_handle_kdeth_eflags(NULL, "Kdeth error: rhf ", packet->rhf); if (packet->rhf & RHF_ICRC_ERR) return ret; packet->ohdr = &hdr->u.oth; ohdr = packet->ohdr; trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf))); /* Get the destination QP number. */ qp_num = be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_qp) & RVT_QPN_MASK; if (lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) goto drop; psn = mask_psn(be32_to_cpu(ohdr->bth[2])); opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff; rcu_read_lock(); qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num); if (!qp) goto rcu_unlock; packet->qp = qp; /* Check for valid receive state. */ spin_lock_irqsave(&qp->r_lock, flags); if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) { ibp->rvp.n_pkt_drops++; goto r_unlock; } if (packet->rhf & RHF_TID_ERR) { /* For TIDERR and RC QPs preemptively schedule a NAK */ u32 tlen = rhf_pkt_len(packet->rhf); /* in bytes */ /* Sanity check packet */ if (tlen < 24) goto r_unlock; /* * Check for GRH. We should never get packets with GRH in this * path. */ if (lnh == HFI1_LRH_GRH) goto r_unlock; if (tid_rdma_tid_err(packet, rcv_type)) goto r_unlock; } /* handle TID RDMA READ */ if (opcode == TID_OP(READ_RESP)) { ibpsn = be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_psn); ibpsn = mask_psn(ibpsn); ret = handle_read_kdeth_eflags(rcd, packet, rcv_type, rte, psn, ibpsn); goto r_unlock; } /* * qp->s_tail_ack_queue points to the rvt_ack_entry currently being * processed. These a completed sequentially so we can be sure that * the pointer will not change until the entire request has completed. */ spin_lock(&qp->s_lock); qpriv = qp->priv; if (qpriv->r_tid_tail == HFI1_QP_WQE_INVALID || qpriv->r_tid_tail == qpriv->r_tid_head) goto unlock; e = &qp->s_ack_queue[qpriv->r_tid_tail]; if (e->opcode != TID_OP(WRITE_REQ)) goto unlock; req = ack_to_tid_req(e); if (req->comp_seg == req->cur_seg) goto unlock; flow = &req->flows[req->clear_tail]; trace_hfi1_eflags_err_write(qp, rcv_type, rte, psn); trace_hfi1_rsp_handle_kdeth_eflags(qp, psn); trace_hfi1_tid_write_rsp_handle_kdeth_eflags(qp); trace_hfi1_tid_req_handle_kdeth_eflags(qp, 0, e->opcode, e->psn, e->lpsn, req); trace_hfi1_tid_flow_handle_kdeth_eflags(qp, req->clear_tail, flow); switch (rcv_type) { case RHF_RCV_TYPE_EXPECTED: switch (rte) { case RHF_RTE_EXPECTED_FLOW_SEQ_ERR: if (!(qpriv->s_flags & HFI1_R_TID_SW_PSN)) { qpriv->s_flags |= HFI1_R_TID_SW_PSN; flow->flow_state.r_next_psn = read_r_next_psn(dd, rcd->ctxt, flow->idx); qpriv->r_next_psn_kdeth = flow->flow_state.r_next_psn; goto nak_psn; } else { /* * If the received PSN does not match the next * expected PSN, NAK the packet. * However, only do that if we know that the a * NAK has already been sent. Otherwise, this * mismatch could be due to packets that were * already in flight. */ diff = cmp_psn(psn, flow->flow_state.r_next_psn); if (diff > 0) goto nak_psn; else if (diff < 0) break; qpriv->s_nak_state = 0; /* * If SW PSN verification is successful and this * is the last packet in the segment, tell the * caller to process it as a normal packet. */ if (psn == full_flow_psn(flow, flow->flow_state.lpsn)) ret = false; flow->flow_state.r_next_psn = mask_psn(psn + 1); qpriv->r_next_psn_kdeth = flow->flow_state.r_next_psn; } break; case RHF_RTE_EXPECTED_FLOW_GEN_ERR: goto nak_psn; default: break; } break; case RHF_RCV_TYPE_ERROR: switch (rte) { case RHF_RTE_ERROR_OP_CODE_ERR: case RHF_RTE_ERROR_KHDR_MIN_LEN_ERR: case RHF_RTE_ERROR_KHDR_HCRC_ERR: case RHF_RTE_ERROR_KHDR_KVER_ERR: case RHF_RTE_ERROR_CONTEXT_ERR: case RHF_RTE_ERROR_KHDR_TID_ERR: default: break; } break; default: break; } unlock: spin_unlock(&qp->s_lock); r_unlock: spin_unlock_irqrestore(&qp->r_lock, flags); rcu_unlock: rcu_read_unlock(); drop: return ret; nak_psn: ibp->rvp.n_rc_seqnak++; if (!qpriv->s_nak_state) { qpriv->s_nak_state = IB_NAK_PSN_ERROR; /* We are NAK'ing the next expected PSN */ qpriv->s_nak_psn = mask_psn(flow->flow_state.r_next_psn); tid_rdma_trigger_ack(qp); } goto unlock; } /* * "Rewind" the TID request information. * This means that we reset the state back to ACTIVE, * find the proper flow, set the flow index to that flow, * and reset the flow information. */ void hfi1_tid_rdma_restart_req(struct rvt_qp *qp, struct rvt_swqe *wqe, u32 *bth2) { struct tid_rdma_request *req = wqe_to_tid_req(wqe); struct tid_rdma_flow *flow; struct hfi1_qp_priv *qpriv = qp->priv; int diff, delta_pkts; u32 tididx = 0, i; u16 fidx; if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) { *bth2 = mask_psn(qp->s_psn); flow = find_flow_ib(req, *bth2, &fidx); if (!flow) { trace_hfi1_msg_tid_restart_req(/* msg */ qp, "!!!!!! Could not find flow to restart: bth2 ", (u64)*bth2); trace_hfi1_tid_req_restart_req(qp, 0, wqe->wr.opcode, wqe->psn, wqe->lpsn, req); return; } } else { fidx = req->acked_tail; flow = &req->flows[fidx]; *bth2 = mask_psn(req->r_ack_psn); } if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) delta_pkts = delta_psn(*bth2, flow->flow_state.ib_spsn); else delta_pkts = delta_psn(*bth2, full_flow_psn(flow, flow->flow_state.spsn)); trace_hfi1_tid_flow_restart_req(qp, fidx, flow); diff = delta_pkts + flow->resync_npkts; flow->sent = 0; flow->pkt = 0; flow->tid_idx = 0; flow->tid_offset = 0; if (diff) { for (tididx = 0; tididx < flow->tidcnt; tididx++) { u32 tidentry = flow->tid_entry[tididx], tidlen, tidnpkts, npkts; flow->tid_offset = 0; tidlen = EXP_TID_GET(tidentry, LEN) * PAGE_SIZE; tidnpkts = rvt_div_round_up_mtu(qp, tidlen); npkts = min_t(u32, diff, tidnpkts); flow->pkt += npkts; flow->sent += (npkts == tidnpkts ? tidlen : npkts * qp->pmtu); flow->tid_offset += npkts * qp->pmtu; diff -= npkts; if (!diff) break; } } if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) { rvt_skip_sge(&qpriv->tid_ss, (req->cur_seg * req->seg_len) + flow->sent, 0); /* * Packet PSN is based on flow_state.spsn + flow->pkt. However, * during a RESYNC, the generation is incremented and the * sequence is reset to 0. Since we've adjusted the npkts in the * flow and the SGE has been sufficiently advanced, we have to * adjust flow->pkt in order to calculate the correct PSN. */ flow->pkt -= flow->resync_npkts; } if (flow->tid_offset == EXP_TID_GET(flow->tid_entry[tididx], LEN) * PAGE_SIZE) { tididx++; flow->tid_offset = 0; } flow->tid_idx = tididx; if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) /* Move flow_idx to correct index */ req->flow_idx = fidx; else req->clear_tail = fidx; trace_hfi1_tid_flow_restart_req(qp, fidx, flow); trace_hfi1_tid_req_restart_req(qp, 0, wqe->wr.opcode, wqe->psn, wqe->lpsn, req); req->state = TID_REQUEST_ACTIVE; if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) { /* Reset all the flows that we are going to resend */ fidx = CIRC_NEXT(fidx, MAX_FLOWS); i = qpriv->s_tid_tail; do { for (; CIRC_CNT(req->setup_head, fidx, MAX_FLOWS); fidx = CIRC_NEXT(fidx, MAX_FLOWS)) { req->flows[fidx].sent = 0; req->flows[fidx].pkt = 0; req->flows[fidx].tid_idx = 0; req->flows[fidx].tid_offset = 0; req->flows[fidx].resync_npkts = 0; } if (i == qpriv->s_tid_cur) break; do { i = (++i == qp->s_size ? 0 : i); wqe = rvt_get_swqe_ptr(qp, i); } while (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE); req = wqe_to_tid_req(wqe); req->cur_seg = req->ack_seg; fidx = req->acked_tail; /* Pull req->clear_tail back */ req->clear_tail = fidx; } while (1); } } void hfi1_qp_kern_exp_rcv_clear_all(struct rvt_qp *qp) { int i, ret; struct hfi1_qp_priv *qpriv = qp->priv; struct tid_flow_state *fs; if (qp->ibqp.qp_type != IB_QPT_RC || !HFI1_CAP_IS_KSET(TID_RDMA)) return; /* * First, clear the flow to help prevent any delayed packets from * being delivered. */ fs = &qpriv->flow_state; if (fs->index != RXE_NUM_TID_FLOWS) hfi1_kern_clear_hw_flow(qpriv->rcd, qp); for (i = qp->s_acked; i != qp->s_head;) { struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, i); if (++i == qp->s_size) i = 0; /* Free only locally allocated TID entries */ if (wqe->wr.opcode != IB_WR_TID_RDMA_READ) continue; do { struct hfi1_swqe_priv *priv = wqe->priv; ret = hfi1_kern_exp_rcv_clear(&priv->tid_req); } while (!ret); } for (i = qp->s_acked_ack_queue; i != qp->r_head_ack_queue;) { struct rvt_ack_entry *e = &qp->s_ack_queue[i]; if (++i == rvt_max_atomic(ib_to_rvt(qp->ibqp.device))) i = 0; /* Free only locally allocated TID entries */ if (e->opcode != TID_OP(WRITE_REQ)) continue; do { struct hfi1_ack_priv *priv = e->priv; ret = hfi1_kern_exp_rcv_clear(&priv->tid_req); } while (!ret); } } bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe) { struct rvt_swqe *prev; struct hfi1_qp_priv *priv = qp->priv; u32 s_prev; struct tid_rdma_request *req; s_prev = (qp->s_cur == 0 ? qp->s_size : qp->s_cur) - 1; prev = rvt_get_swqe_ptr(qp, s_prev); switch (wqe->wr.opcode) { case IB_WR_SEND: case IB_WR_SEND_WITH_IMM: case IB_WR_SEND_WITH_INV: case IB_WR_ATOMIC_CMP_AND_SWP: case IB_WR_ATOMIC_FETCH_AND_ADD: case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE_WITH_IMM: switch (prev->wr.opcode) { case IB_WR_TID_RDMA_WRITE: req = wqe_to_tid_req(prev); if (req->ack_seg != req->total_segs) goto interlock; break; default: break; } break; case IB_WR_RDMA_READ: if (prev->wr.opcode != IB_WR_TID_RDMA_WRITE) break; fallthrough; case IB_WR_TID_RDMA_READ: switch (prev->wr.opcode) { case IB_WR_RDMA_READ: if (qp->s_acked != qp->s_cur) goto interlock; break; case IB_WR_TID_RDMA_WRITE: req = wqe_to_tid_req(prev); if (req->ack_seg != req->total_segs) goto interlock; break; default: break; } break; default: break; } return false; interlock: priv->s_flags |= HFI1_S_TID_WAIT_INTERLCK; return true; } /* Does @sge meet the alignment requirements for tid rdma? */ static inline bool hfi1_check_sge_align(struct rvt_qp *qp, struct rvt_sge *sge, int num_sge) { int i; for (i = 0; i < num_sge; i++, sge++) { trace_hfi1_sge_check_align(qp, i, sge); if ((u64)sge->vaddr & ~PAGE_MASK || sge->sge_length & ~PAGE_MASK) return false; } return true; } void setup_tid_rdma_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe) { struct hfi1_qp_priv *qpriv = (struct hfi1_qp_priv *)qp->priv; struct hfi1_swqe_priv *priv = wqe->priv; struct tid_rdma_params *remote; enum ib_wr_opcode new_opcode; bool do_tid_rdma = false; struct hfi1_pportdata *ppd = qpriv->rcd->ppd; if ((rdma_ah_get_dlid(&qp->remote_ah_attr) & ~((1 << ppd->lmc) - 1)) == ppd->lid) return; if (qpriv->hdr_type != HFI1_PKT_TYPE_9B) return; rcu_read_lock(); remote = rcu_dereference(qpriv->tid_rdma.remote); /* * If TID RDMA is disabled by the negotiation, don't * use it. */ if (!remote) goto exit; if (wqe->wr.opcode == IB_WR_RDMA_READ) { if (hfi1_check_sge_align(qp, &wqe->sg_list[0], wqe->wr.num_sge)) { new_opcode = IB_WR_TID_RDMA_READ; do_tid_rdma = true; } } else if (wqe->wr.opcode == IB_WR_RDMA_WRITE) { /* * TID RDMA is enabled for this RDMA WRITE request iff: * 1. The remote address is page-aligned, * 2. The length is larger than the minimum segment size, * 3. The length is page-multiple. */ if (!(wqe->rdma_wr.remote_addr & ~PAGE_MASK) && !(wqe->length & ~PAGE_MASK)) { new_opcode = IB_WR_TID_RDMA_WRITE; do_tid_rdma = true; } } if (do_tid_rdma) { if (hfi1_kern_exp_rcv_alloc_flows(&priv->tid_req, GFP_ATOMIC)) goto exit; wqe->wr.opcode = new_opcode; priv->tid_req.seg_len = min_t(u32, remote->max_len, wqe->length); priv->tid_req.total_segs = DIV_ROUND_UP(wqe->length, priv->tid_req.seg_len); /* Compute the last PSN of the request */ wqe->lpsn = wqe->psn; if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) { priv->tid_req.n_flows = remote->max_read; qpriv->tid_r_reqs++; wqe->lpsn += rvt_div_round_up_mtu(qp, wqe->length) - 1; } else { wqe->lpsn += priv->tid_req.total_segs - 1; atomic_inc(&qpriv->n_requests); } priv->tid_req.cur_seg = 0; priv->tid_req.comp_seg = 0; priv->tid_req.ack_seg = 0; priv->tid_req.state = TID_REQUEST_INACTIVE; /* * Reset acked_tail. * TID RDMA READ does not have ACKs so it does not * update the pointer. We have to reset it so TID RDMA * WRITE does not get confused. */ priv->tid_req.acked_tail = priv->tid_req.setup_head; trace_hfi1_tid_req_setup_tid_wqe(qp, 1, wqe->wr.opcode, wqe->psn, wqe->lpsn, &priv->tid_req); } exit: rcu_read_unlock(); } /* TID RDMA WRITE functions */ u32 hfi1_build_tid_rdma_write_req(struct rvt_qp *qp, struct rvt_swqe *wqe, struct ib_other_headers *ohdr, u32 *bth1, u32 *bth2, u32 *len) { struct hfi1_qp_priv *qpriv = qp->priv; struct tid_rdma_request *req = wqe_to_tid_req(wqe); struct tid_rdma_params *remote; rcu_read_lock(); remote = rcu_dereference(qpriv->tid_rdma.remote); /* * Set the number of flow to be used based on negotiated * parameters. */ req->n_flows = remote->max_write; req->state = TID_REQUEST_ACTIVE; KDETH_RESET(ohdr->u.tid_rdma.w_req.kdeth0, KVER, 0x1); KDETH_RESET(ohdr->u.tid_rdma.w_req.kdeth1, JKEY, remote->jkey); ohdr->u.tid_rdma.w_req.reth.vaddr = cpu_to_be64(wqe->rdma_wr.remote_addr + (wqe->length - *len)); ohdr->u.tid_rdma.w_req.reth.rkey = cpu_to_be32(wqe->rdma_wr.rkey); ohdr->u.tid_rdma.w_req.reth.length = cpu_to_be32(*len); ohdr->u.tid_rdma.w_req.verbs_qp = cpu_to_be32(qp->remote_qpn); *bth1 &= ~RVT_QPN_MASK; *bth1 |= remote->qp; qp->s_state = TID_OP(WRITE_REQ); qp->s_flags |= HFI1_S_WAIT_TID_RESP; *bth2 |= IB_BTH_REQ_ACK; *len = 0; rcu_read_unlock(); return sizeof(ohdr->u.tid_rdma.w_req) / sizeof(u32); } static u32 hfi1_compute_tid_rdma_flow_wt(struct rvt_qp *qp) { /* * Heuristic for computing the RNR timeout when waiting on the flow * queue. Rather than a computationaly expensive exact estimate of when * a flow will be available, we assume that if a QP is at position N in * the flow queue it has to wait approximately (N + 1) * (number of * segments between two sync points). The rationale for this is that * flows are released and recycled at each sync point. */ return (MAX_TID_FLOW_PSN * qp->pmtu) >> TID_RDMA_SEGMENT_SHIFT; } static u32 position_in_queue(struct hfi1_qp_priv *qpriv, struct tid_queue *queue) { return qpriv->tid_enqueue - queue->dequeue; } /* * @qp: points to rvt_qp context. * @to_seg: desired RNR timeout in segments. * Return: index of the next highest timeout in the ib_hfi1_rnr_table[] */ static u32 hfi1_compute_tid_rnr_timeout(struct rvt_qp *qp, u32 to_seg) { struct hfi1_qp_priv *qpriv = qp->priv; u64 timeout; u32 bytes_per_us; u8 i; bytes_per_us = active_egress_rate(qpriv->rcd->ppd) / 8; timeout = (to_seg * TID_RDMA_MAX_SEGMENT_SIZE) / bytes_per_us; /* * Find the next highest value in the RNR table to the required * timeout. This gives the responder some padding. */ for (i = 1; i <= IB_AETH_CREDIT_MASK; i++) if (rvt_rnr_tbl_to_usec(i) >= timeout) return i; return 0; } /* * Central place for resource allocation at TID write responder, * is called from write_req and write_data interrupt handlers as * well as the send thread when a queued QP is scheduled for * resource allocation. * * Iterates over (a) segments of a request and then (b) queued requests * themselves to allocate resources for up to local->max_write * segments across multiple requests. Stop allocating when we * hit a sync point, resume allocating after data packets at * sync point have been received. * * Resource allocation and sending of responses is decoupled. The * request/segment which are being allocated and sent are as follows. * Resources are allocated for: * [request: qpriv->r_tid_alloc, segment: req->alloc_seg] * The send thread sends: * [request: qp->s_tail_ack_queue, segment:req->cur_seg] */ static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx) { struct tid_rdma_request *req; struct hfi1_qp_priv *qpriv = qp->priv; struct hfi1_ctxtdata *rcd = qpriv->rcd; struct tid_rdma_params *local = &qpriv->tid_rdma.local; struct rvt_ack_entry *e; u32 npkts, to_seg; bool last; int ret = 0; lockdep_assert_held(&qp->s_lock); while (1) { trace_hfi1_rsp_tid_write_alloc_res(qp, 0); trace_hfi1_tid_write_rsp_alloc_res(qp); /* * Don't allocate more segments if a RNR NAK has already been * scheduled to avoid messing up qp->r_psn: the RNR NAK will * be sent only when all allocated segments have been sent. * However, if more segments are allocated before that, TID RDMA * WRITE RESP packets will be sent out for these new segments * before the RNR NAK packet. When the requester receives the * RNR NAK packet, it will restart with qp->s_last_psn + 1, * which does not match qp->r_psn and will be dropped. * Consequently, the requester will exhaust its retries and * put the qp into error state. */ if (qpriv->rnr_nak_state == TID_RNR_NAK_SEND) break; /* No requests left to process */ if (qpriv->r_tid_alloc == qpriv->r_tid_head) { /* If all data has been received, clear the flow */ if (qpriv->flow_state.index < RXE_NUM_TID_FLOWS && !qpriv->alloc_w_segs) { hfi1_kern_clear_hw_flow(rcd, qp); qpriv->s_flags &= ~HFI1_R_TID_SW_PSN; } break; } e = &qp->s_ack_queue[qpriv->r_tid_alloc]; if (e->opcode != TID_OP(WRITE_REQ)) goto next_req; req = ack_to_tid_req(e); trace_hfi1_tid_req_write_alloc_res(qp, 0, e->opcode, e->psn, e->lpsn, req); /* Finished allocating for all segments of this request */ if (req->alloc_seg >= req->total_segs) goto next_req; /* Can allocate only a maximum of local->max_write for a QP */ if (qpriv->alloc_w_segs >= local->max_write) break; /* Don't allocate at a sync point with data packets pending */ if (qpriv->sync_pt && qpriv->alloc_w_segs) break; /* All data received at the sync point, continue */ if (qpriv->sync_pt && !qpriv->alloc_w_segs) { hfi1_kern_clear_hw_flow(rcd, qp); qpriv->sync_pt = false; qpriv->s_flags &= ~HFI1_R_TID_SW_PSN; } /* Allocate flow if we don't have one */ if (qpriv->flow_state.index >= RXE_NUM_TID_FLOWS) { ret = hfi1_kern_setup_hw_flow(qpriv->rcd, qp); if (ret) { to_seg = hfi1_compute_tid_rdma_flow_wt(qp) * position_in_queue(qpriv, &rcd->flow_queue); break; } } npkts = rvt_div_round_up_mtu(qp, req->seg_len); /* * We are at a sync point if we run out of KDETH PSN space. * Last PSN of every generation is reserved for RESYNC. */ if (qpriv->flow_state.psn + npkts > MAX_TID_FLOW_PSN - 1) { qpriv->sync_pt = true; break; } /* * If overtaking req->acked_tail, send an RNR NAK. Because the * QP is not queued in this case, and the issue can only be * caused by a delay in scheduling the second leg which we * cannot estimate, we use a rather arbitrary RNR timeout of * (MAX_FLOWS / 2) segments */ if (!CIRC_SPACE(req->setup_head, req->acked_tail, MAX_FLOWS)) { ret = -EAGAIN; to_seg = MAX_FLOWS >> 1; tid_rdma_trigger_ack(qp); break; } /* Try to allocate rcv array / TID entries */ ret = hfi1_kern_exp_rcv_setup(req, &req->ss, &last); if (ret == -EAGAIN) to_seg = position_in_queue(qpriv, &rcd->rarr_queue); if (ret) break; qpriv->alloc_w_segs++; req->alloc_seg++; continue; next_req: /* Begin processing the next request */ if (++qpriv->r_tid_alloc > rvt_size_atomic(ib_to_rvt(qp->ibqp.device))) qpriv->r_tid_alloc = 0; } /* * Schedule an RNR NAK to be sent if (a) flow or rcv array allocation * has failed (b) we are called from the rcv handler interrupt context * (c) an RNR NAK has not already been scheduled */ if (ret == -EAGAIN && intr_ctx && !qp->r_nak_state) goto send_rnr_nak; return; send_rnr_nak: lockdep_assert_held(&qp->r_lock); /* Set r_nak_state to prevent unrelated events from generating NAK's */ qp->r_nak_state = hfi1_compute_tid_rnr_timeout(qp, to_seg) | IB_RNR_NAK; /* Pull back r_psn to the segment being RNR NAK'd */ qp->r_psn = e->psn + req->alloc_seg; qp->r_ack_psn = qp->r_psn; /* * Pull back r_head_ack_queue to the ack entry following the request * being RNR NAK'd. This allows resources to be allocated to the request * if the queued QP is scheduled. */ qp->r_head_ack_queue = qpriv->r_tid_alloc + 1; if (qp->r_head_ack_queue > rvt_size_atomic(ib_to_rvt(qp->ibqp.device))) qp->r_head_ack_queue = 0; qpriv->r_tid_head = qp->r_head_ack_queue; /* * These send side fields are used in make_rc_ack(). They are set in * hfi1_send_rc_ack() but must be set here before dropping qp->s_lock * for consistency */ qp->s_nak_state = qp->r_nak_state; qp->s_ack_psn = qp->r_ack_psn; /* * Clear the ACK PENDING flag to prevent unwanted ACK because we * have modified qp->s_ack_psn here. */ qp->s_flags &= ~(RVT_S_ACK_PENDING); trace_hfi1_rsp_tid_write_alloc_res(qp, qp->r_psn); /* * qpriv->rnr_nak_state is used to determine when the scheduled RNR NAK * has actually been sent. qp->s_flags RVT_S_ACK_PENDING bit cannot be * used for this because qp->s_lock is dropped before calling * hfi1_send_rc_ack() leading to inconsistency between the receive * interrupt handlers and the send thread in make_rc_ack() */ qpriv->rnr_nak_state = TID_RNR_NAK_SEND; /* * Schedule RNR NAK to be sent. RNR NAK's are scheduled from the receive * interrupt handlers but will be sent from the send engine behind any * previous responses that may have been scheduled */ rc_defered_ack(rcd, qp); } void hfi1_rc_rcv_tid_rdma_write_req(struct hfi1_packet *packet) { /* HANDLER FOR TID RDMA WRITE REQUEST packet (Responder side)*/ /* * 1. Verify TID RDMA WRITE REQ as per IB_OPCODE_RC_RDMA_WRITE_FIRST * (see hfi1_rc_rcv()) * - Don't allow 0-length requests. * 2. Put TID RDMA WRITE REQ into the response queueu (s_ack_queue) * - Setup struct tid_rdma_req with request info * - Prepare struct tid_rdma_flow array? * 3. Set the qp->s_ack_state as state diagram in design doc. * 4. Set RVT_S_RESP_PENDING in s_flags. * 5. Kick the send engine (hfi1_schedule_send()) */ struct hfi1_ctxtdata *rcd = packet->rcd; struct rvt_qp *qp = packet->qp; struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); struct ib_other_headers *ohdr = packet->ohdr; struct rvt_ack_entry *e; unsigned long flags; struct ib_reth *reth; struct hfi1_qp_priv *qpriv = qp->priv; struct tid_rdma_request *req; u32 bth0, psn, len, rkey, num_segs; bool fecn; u8 next; u64 vaddr; int diff; bth0 = be32_to_cpu(ohdr->bth[0]); if (hfi1_ruc_check_hdr(ibp, packet)) return; fecn = process_ecn(qp, packet); psn = mask_psn(be32_to_cpu(ohdr->bth[2])); trace_hfi1_rsp_rcv_tid_write_req(qp, psn); if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) rvt_comm_est(qp); if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) goto nack_inv; reth = &ohdr->u.tid_rdma.w_req.reth; vaddr = be64_to_cpu(reth->vaddr); len = be32_to_cpu(reth->length); num_segs = DIV_ROUND_UP(len, qpriv->tid_rdma.local.max_len); diff = delta_psn(psn, qp->r_psn); if (unlikely(diff)) { tid_rdma_rcv_err(packet, ohdr, qp, psn, diff, fecn); return; } /* * The resent request which was previously RNR NAK'd is inserted at the * location of the original request, which is one entry behind * r_head_ack_queue */ if (qpriv->rnr_nak_state) qp->r_head_ack_queue = qp->r_head_ack_queue ? qp->r_head_ack_queue - 1 : rvt_size_atomic(ib_to_rvt(qp->ibqp.device)); /* We've verified the request, insert it into the ack queue. */ next = qp->r_head_ack_queue + 1; if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device))) next = 0; spin_lock_irqsave(&qp->s_lock, flags); if (unlikely(next == qp->s_acked_ack_queue)) { if (!qp->s_ack_queue[next].sent) goto nack_inv_unlock; update_ack_queue(qp, next); } e = &qp->s_ack_queue[qp->r_head_ack_queue]; req = ack_to_tid_req(e); /* Bring previously RNR NAK'd request back to life */ if (qpriv->rnr_nak_state) { qp->r_nak_state = 0; qp->s_nak_state = 0; qpriv->rnr_nak_state = TID_RNR_NAK_INIT; qp->r_psn = e->lpsn + 1; req->state = TID_REQUEST_INIT; goto update_head; } release_rdma_sge_mr(e); /* The length needs to be in multiples of PAGE_SIZE */ if (!len || len & ~PAGE_MASK) goto nack_inv_unlock; rkey = be32_to_cpu(reth->rkey); qp->r_len = len; if (e->opcode == TID_OP(WRITE_REQ) && (req->setup_head != req->clear_tail || req->clear_tail != req->acked_tail)) goto nack_inv_unlock; if (unlikely(!rvt_rkey_ok(qp, &e->rdma_sge, qp->r_len, vaddr, rkey, IB_ACCESS_REMOTE_WRITE))) goto nack_acc; qp->r_psn += num_segs - 1; e->opcode = (bth0 >> 24) & 0xff; e->psn = psn; e->lpsn = qp->r_psn; e->sent = 0; req->n_flows = min_t(u16, num_segs, qpriv->tid_rdma.local.max_write); req->state = TID_REQUEST_INIT; req->cur_seg = 0; req->comp_seg = 0; req->ack_seg = 0; req->alloc_seg = 0; req->isge = 0; req->seg_len = qpriv->tid_rdma.local.max_len; req->total_len = len; req->total_segs = num_segs; req->r_flow_psn = e->psn; req->ss.sge = e->rdma_sge; req->ss.num_sge = 1; req->flow_idx = req->setup_head; req->clear_tail = req->setup_head; req->acked_tail = req->setup_head; qp->r_state = e->opcode; qp->r_nak_state = 0; /* * We need to increment the MSN here instead of when we * finish sending the result since a duplicate request would * increment it more than once. */ qp->r_msn++; qp->r_psn++; trace_hfi1_tid_req_rcv_write_req(qp, 0, e->opcode, e->psn, e->lpsn, req); if (qpriv->r_tid_tail == HFI1_QP_WQE_INVALID) { qpriv->r_tid_tail = qp->r_head_ack_queue; } else if (qpriv->r_tid_tail == qpriv->r_tid_head) { struct tid_rdma_request *ptr; e = &qp->s_ack_queue[qpriv->r_tid_tail]; ptr = ack_to_tid_req(e); if (e->opcode != TID_OP(WRITE_REQ) || ptr->comp_seg == ptr->total_segs) { if (qpriv->r_tid_tail == qpriv->r_tid_ack) qpriv->r_tid_ack = qp->r_head_ack_queue; qpriv->r_tid_tail = qp->r_head_ack_queue; } } update_head: qp->r_head_ack_queue = next; qpriv->r_tid_head = qp->r_head_ack_queue; hfi1_tid_write_alloc_resources(qp, true); trace_hfi1_tid_write_rsp_rcv_req(qp); /* Schedule the send tasklet. */ qp->s_flags |= RVT_S_RESP_PENDING; if (fecn) qp->s_flags |= RVT_S_ECN; hfi1_schedule_send(qp); spin_unlock_irqrestore(&qp->s_lock, flags); return; nack_inv_unlock: spin_unlock_irqrestore(&qp->s_lock, flags); nack_inv: rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR); qp->r_nak_state = IB_NAK_INVALID_REQUEST; qp->r_ack_psn = qp->r_psn; /* Queue NAK for later */ rc_defered_ack(rcd, qp); return; nack_acc: spin_unlock_irqrestore(&qp->s_lock, flags); rvt_rc_error(qp, IB_WC_LOC_PROT_ERR); qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR; qp->r_ack_psn = qp->r_psn; } u32 hfi1_build_tid_rdma_write_resp(struct rvt_qp *qp, struct rvt_ack_entry *e, struct ib_other_headers *ohdr, u32 *bth1, u32 bth2, u32 *len, struct rvt_sge_state **ss) { struct hfi1_ack_priv *epriv = e->priv; struct tid_rdma_request *req = &epriv->tid_req; struct hfi1_qp_priv *qpriv = qp->priv; struct tid_rdma_flow *flow = NULL; u32 resp_len = 0, hdwords = 0; void *resp_addr = NULL; struct tid_rdma_params *remote; trace_hfi1_tid_req_build_write_resp(qp, 0, e->opcode, e->psn, e->lpsn, req); trace_hfi1_tid_write_rsp_build_resp(qp); trace_hfi1_rsp_build_tid_write_resp(qp, bth2); flow = &req->flows[req->flow_idx]; switch (req->state) { default: /* * Try to allocate resources here in case QP was queued and was * later scheduled when resources became available */ hfi1_tid_write_alloc_resources(qp, false); /* We've already sent everything which is ready */ if (req->cur_seg >= req->alloc_seg) goto done; /* * Resources can be assigned but responses cannot be sent in * rnr_nak state, till the resent request is received */ if (qpriv->rnr_nak_state == TID_RNR_NAK_SENT) goto done; req->state = TID_REQUEST_ACTIVE; trace_hfi1_tid_flow_build_write_resp(qp, req->flow_idx, flow); req->flow_idx = CIRC_NEXT(req->flow_idx, MAX_FLOWS); hfi1_add_tid_reap_timer(qp); break; case TID_REQUEST_RESEND_ACTIVE: case TID_REQUEST_RESEND: trace_hfi1_tid_flow_build_write_resp(qp, req->flow_idx, flow); req->flow_idx = CIRC_NEXT(req->flow_idx, MAX_FLOWS); if (!CIRC_CNT(req->setup_head, req->flow_idx, MAX_FLOWS)) req->state = TID_REQUEST_ACTIVE; hfi1_mod_tid_reap_timer(qp); break; } flow->flow_state.resp_ib_psn = bth2; resp_addr = (void *)flow->tid_entry; resp_len = sizeof(*flow->tid_entry) * flow->tidcnt; req->cur_seg++; memset(&ohdr->u.tid_rdma.w_rsp, 0, sizeof(ohdr->u.tid_rdma.w_rsp)); epriv->ss.sge.vaddr = resp_addr; epriv->ss.sge.sge_length = resp_len; epriv->ss.sge.length = epriv->ss.sge.sge_length; /* * We can safely zero these out. Since the first SGE covers the * entire packet, nothing else should even look at the MR. */ epriv->ss.sge.mr = NULL; epriv->ss.sge.m = 0; epriv->ss.sge.n = 0; epriv->ss.sg_list = NULL; epriv->ss.total_len = epriv->ss.sge.sge_length; epriv->ss.num_sge = 1; *ss = &epriv->ss; *len = epriv->ss.total_len; /* Construct the TID RDMA WRITE RESP packet header */ rcu_read_lock(); remote = rcu_dereference(qpriv->tid_rdma.remote); KDETH_RESET(ohdr->u.tid_rdma.w_rsp.kdeth0, KVER, 0x1); KDETH_RESET(ohdr->u.tid_rdma.w_rsp.kdeth1, JKEY, remote->jkey); ohdr->u.tid_rdma.w_rsp.aeth = rvt_compute_aeth(qp); ohdr->u.tid_rdma.w_rsp.tid_flow_psn = cpu_to_be32((flow->flow_state.generation << HFI1_KDETH_BTH_SEQ_SHIFT) | (flow->flow_state.spsn & HFI1_KDETH_BTH_SEQ_MASK)); ohdr->u.tid_rdma.w_rsp.tid_flow_qp = cpu_to_be32(qpriv->tid_rdma.local.qp | ((flow->idx & TID_RDMA_DESTQP_FLOW_MASK) << TID_RDMA_DESTQP_FLOW_SHIFT) | qpriv->rcd->ctxt); ohdr->u.tid_rdma.w_rsp.verbs_qp = cpu_to_be32(qp->remote_qpn); *bth1 = remote->qp; rcu_read_unlock(); hdwords = sizeof(ohdr->u.tid_rdma.w_rsp) / sizeof(u32); qpriv->pending_tid_w_segs++; done: return hdwords; } static void hfi1_add_tid_reap_timer(struct rvt_qp *qp) { struct hfi1_qp_priv *qpriv = qp->priv; lockdep_assert_held(&qp->s_lock); if (!(qpriv->s_flags & HFI1_R_TID_RSC_TIMER)) { qpriv->s_flags |= HFI1_R_TID_RSC_TIMER; qpriv->s_tid_timer.expires = jiffies + qpriv->tid_timer_timeout_jiffies; add_timer(&qpriv->s_tid_timer); } } static void hfi1_mod_tid_reap_timer(struct rvt_qp *qp) { struct hfi1_qp_priv *qpriv = qp->priv; lockdep_assert_held(&qp->s_lock); qpriv->s_flags |= HFI1_R_TID_RSC_TIMER; mod_timer(&qpriv->s_tid_timer, jiffies + qpriv->tid_timer_timeout_jiffies); } static int hfi1_stop_tid_reap_timer(struct rvt_qp *qp) { struct hfi1_qp_priv *qpriv = qp->priv; int rval = 0; lockdep_assert_held(&qp->s_lock); if (qpriv->s_flags & HFI1_R_TID_RSC_TIMER) { rval = del_timer(&qpriv->s_tid_timer); qpriv->s_flags &= ~HFI1_R_TID_RSC_TIMER; } return rval; } void hfi1_del_tid_reap_timer(struct rvt_qp *qp) { struct hfi1_qp_priv *qpriv = qp->priv; del_timer_sync(&qpriv->s_tid_timer); qpriv->s_flags &= ~HFI1_R_TID_RSC_TIMER; } static void hfi1_tid_timeout(struct timer_list *t) { struct hfi1_qp_priv *qpriv = from_timer(qpriv, t, s_tid_timer); struct rvt_qp *qp = qpriv->owner; struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); unsigned long flags; u32 i; spin_lock_irqsave(&qp->r_lock, flags); spin_lock(&qp->s_lock); if (qpriv->s_flags & HFI1_R_TID_RSC_TIMER) { dd_dev_warn(dd_from_ibdev(qp->ibqp.device), "[QP%u] %s %d\n", qp->ibqp.qp_num, __func__, __LINE__); trace_hfi1_msg_tid_timeout(/* msg */ qp, "resource timeout = ", (u64)qpriv->tid_timer_timeout_jiffies); hfi1_stop_tid_reap_timer(qp); /* * Go though the entire ack queue and clear any outstanding * HW flow and RcvArray resources. */ hfi1_kern_clear_hw_flow(qpriv->rcd, qp); for (i = 0; i < rvt_max_atomic(rdi); i++) { struct tid_rdma_request *req = ack_to_tid_req(&qp->s_ack_queue[i]); hfi1_kern_exp_rcv_clear_all(req); } spin_unlock(&qp->s_lock); if (qp->ibqp.event_handler) { struct ib_event ev; ev.device = qp->ibqp.device; ev.element.qp = &qp->ibqp; ev.event = IB_EVENT_QP_FATAL; qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); } rvt_rc_error(qp, IB_WC_RESP_TIMEOUT_ERR); goto unlock_r_lock; } spin_unlock(&qp->s_lock); unlock_r_lock: spin_unlock_irqrestore(&qp->r_lock, flags); } void hfi1_rc_rcv_tid_rdma_write_resp(struct hfi1_packet *packet) { /* HANDLER FOR TID RDMA WRITE RESPONSE packet (Requestor side */ /* * 1. Find matching SWQE * 2. Check that TIDENTRY array has enough space for a complete * segment. If not, put QP in error state. * 3. Save response data in struct tid_rdma_req and struct tid_rdma_flow * 4. Remove HFI1_S_WAIT_TID_RESP from s_flags. * 5. Set qp->s_state * 6. Kick the send engine (hfi1_schedule_send()) */ struct ib_other_headers *ohdr = packet->ohdr; struct rvt_qp *qp = packet->qp; struct hfi1_qp_priv *qpriv = qp->priv; struct hfi1_ctxtdata *rcd = packet->rcd; struct rvt_swqe *wqe; struct tid_rdma_request *req; struct tid_rdma_flow *flow; enum ib_wc_status status; u32 opcode, aeth, psn, flow_psn, i, tidlen = 0, pktlen; bool fecn; unsigned long flags; fecn = process_ecn(qp, packet); psn = mask_psn(be32_to_cpu(ohdr->bth[2])); aeth = be32_to_cpu(ohdr->u.tid_rdma.w_rsp.aeth); opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff; spin_lock_irqsave(&qp->s_lock, flags); /* Ignore invalid responses */ if (cmp_psn(psn, qp->s_next_psn) >= 0) goto ack_done; /* Ignore duplicate responses. */ if (unlikely(cmp_psn(psn, qp->s_last_psn) <= 0)) goto ack_done; if (unlikely(qp->s_acked == qp->s_tail)) goto ack_done; /* * If we are waiting for a particular packet sequence number * due to a request being resent, check for it. Otherwise, * ensure that we haven't missed anything. */ if (qp->r_flags & RVT_R_RDMAR_SEQ) { if (cmp_psn(psn, qp->s_last_psn + 1) != 0) goto ack_done; qp->r_flags &= ~RVT_R_RDMAR_SEQ; } wqe = rvt_get_swqe_ptr(qp, qpriv->s_tid_cur); if (unlikely(wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)) goto ack_op_err; req = wqe_to_tid_req(wqe); /* * If we've lost ACKs and our acked_tail pointer is too far * behind, don't overwrite segments. Just drop the packet and * let the reliability protocol take care of it. */ if (!CIRC_SPACE(req->setup_head, req->acked_tail, MAX_FLOWS)) goto ack_done; /* * The call to do_rc_ack() should be last in the chain of * packet checks because it will end up updating the QP state. * Therefore, anything that would prevent the packet from * being accepted as a successful response should be prior * to it. */ if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd)) goto ack_done; trace_hfi1_ack(qp, psn); flow = &req->flows[req->setup_head]; flow->pkt = 0; flow->tid_idx = 0; flow->tid_offset = 0; flow->sent = 0; flow->resync_npkts = 0; flow->tid_qpn = be32_to_cpu(ohdr->u.tid_rdma.w_rsp.tid_flow_qp); flow->idx = (flow->tid_qpn >> TID_RDMA_DESTQP_FLOW_SHIFT) & TID_RDMA_DESTQP_FLOW_MASK; flow_psn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.w_rsp.tid_flow_psn)); flow->flow_state.generation = flow_psn >> HFI1_KDETH_BTH_SEQ_SHIFT; flow->flow_state.spsn = flow_psn & HFI1_KDETH_BTH_SEQ_MASK; flow->flow_state.resp_ib_psn = psn; flow->length = min_t(u32, req->seg_len, (wqe->length - (req->comp_seg * req->seg_len))); flow->npkts = rvt_div_round_up_mtu(qp, flow->length); flow->flow_state.lpsn = flow->flow_state.spsn + flow->npkts - 1; /* payload length = packet length - (header length + ICRC length) */ pktlen = packet->tlen - (packet->hlen + 4); if (pktlen > sizeof(flow->tid_entry)) { status = IB_WC_LOC_LEN_ERR; goto ack_err; } memcpy(flow->tid_entry, packet->ebuf, pktlen); flow->tidcnt = pktlen / sizeof(*flow->tid_entry); trace_hfi1_tid_flow_rcv_write_resp(qp, req->setup_head, flow); req->comp_seg++; trace_hfi1_tid_write_sender_rcv_resp(qp, 0); /* * Walk the TID_ENTRY list to make sure we have enough space for a * complete segment. */ for (i = 0; i < flow->tidcnt; i++) { trace_hfi1_tid_entry_rcv_write_resp(/* entry */ qp, i, flow->tid_entry[i]); if (!EXP_TID_GET(flow->tid_entry[i], LEN)) { status = IB_WC_LOC_LEN_ERR; goto ack_err; } tidlen += EXP_TID_GET(flow->tid_entry[i], LEN); } if (tidlen * PAGE_SIZE < flow->length) { status = IB_WC_LOC_LEN_ERR; goto ack_err; } trace_hfi1_tid_req_rcv_write_resp(qp, 0, wqe->wr.opcode, wqe->psn, wqe->lpsn, req); /* * If this is the first response for this request, set the initial * flow index to the current flow. */ if (!cmp_psn(psn, wqe->psn)) { req->r_last_acked = mask_psn(wqe->psn - 1); /* Set acked flow index to head index */ req->acked_tail = req->setup_head; } /* advance circular buffer head */ req->setup_head = CIRC_NEXT(req->setup_head, MAX_FLOWS); req->state = TID_REQUEST_ACTIVE; /* * If all responses for this TID RDMA WRITE request have been received * advance the pointer to the next one. * Since TID RDMA requests could be mixed in with regular IB requests, * they might not appear sequentially in the queue. Therefore, the * next request needs to be "found". */ if (qpriv->s_tid_cur != qpriv->s_tid_head && req->comp_seg == req->total_segs) { for (i = qpriv->s_tid_cur + 1; ; i++) { if (i == qp->s_size) i = 0; wqe = rvt_get_swqe_ptr(qp, i); if (i == qpriv->s_tid_head) break; if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) break; } qpriv->s_tid_cur = i; } qp->s_flags &= ~HFI1_S_WAIT_TID_RESP; hfi1_schedule_tid_send(qp); goto ack_done; ack_op_err: status = IB_WC_LOC_QP_OP_ERR; ack_err: rvt_error_qp(qp, status); ack_done: if (fecn) qp->s_flags |= RVT_S_ECN; spin_unlock_irqrestore(&qp->s_lock, flags); } bool hfi1_build_tid_rdma_packet(struct rvt_swqe *wqe, struct ib_other_headers *ohdr, u32 *bth1, u32 *bth2, u32 *len) { struct tid_rdma_request *req = wqe_to_tid_req(wqe); struct tid_rdma_flow *flow = &req->flows[req->clear_tail]; struct tid_rdma_params *remote; struct rvt_qp *qp = req->qp; struct hfi1_qp_priv *qpriv = qp->priv; u32 tidentry = flow->tid_entry[flow->tid_idx]; u32 tidlen = EXP_TID_GET(tidentry, LEN) << PAGE_SHIFT; struct tid_rdma_write_data *wd = &ohdr->u.tid_rdma.w_data; u32 next_offset, om = KDETH_OM_LARGE; bool last_pkt; if (!tidlen) { hfi1_trdma_send_complete(qp, wqe, IB_WC_REM_INV_RD_REQ_ERR); rvt_error_qp(qp, IB_WC_REM_INV_RD_REQ_ERR); } *len = min_t(u32, qp->pmtu, tidlen - flow->tid_offset); flow->sent += *len; next_offset = flow->tid_offset + *len; last_pkt = (flow->tid_idx == (flow->tidcnt - 1) && next_offset >= tidlen) || (flow->sent >= flow->length); trace_hfi1_tid_entry_build_write_data(qp, flow->tid_idx, tidentry); trace_hfi1_tid_flow_build_write_data(qp, req->clear_tail, flow); rcu_read_lock(); remote = rcu_dereference(qpriv->tid_rdma.remote); KDETH_RESET(wd->kdeth0, KVER, 0x1); KDETH_SET(wd->kdeth0, SH, !last_pkt); KDETH_SET(wd->kdeth0, INTR, !!(!last_pkt && remote->urg)); KDETH_SET(wd->kdeth0, TIDCTRL, EXP_TID_GET(tidentry, CTRL)); KDETH_SET(wd->kdeth0, TID, EXP_TID_GET(tidentry, IDX)); KDETH_SET(wd->kdeth0, OM, om == KDETH_OM_LARGE); KDETH_SET(wd->kdeth0, OFFSET, flow->tid_offset / om); KDETH_RESET(wd->kdeth1, JKEY, remote->jkey); wd->verbs_qp = cpu_to_be32(qp->remote_qpn); rcu_read_unlock(); *bth1 = flow->tid_qpn; *bth2 = mask_psn(((flow->flow_state.spsn + flow->pkt++) & HFI1_KDETH_BTH_SEQ_MASK) | (flow->flow_state.generation << HFI1_KDETH_BTH_SEQ_SHIFT)); if (last_pkt) { /* PSNs are zero-based, so +1 to count number of packets */ if (flow->flow_state.lpsn + 1 + rvt_div_round_up_mtu(qp, req->seg_len) > MAX_TID_FLOW_PSN) req->state = TID_REQUEST_SYNC; *bth2 |= IB_BTH_REQ_ACK; } if (next_offset >= tidlen) { flow->tid_offset = 0; flow->tid_idx++; } else { flow->tid_offset = next_offset; } return last_pkt; } void hfi1_rc_rcv_tid_rdma_write_data(struct hfi1_packet *packet) { struct rvt_qp *qp = packet->qp; struct hfi1_qp_priv *priv = qp->priv; struct hfi1_ctxtdata *rcd = priv->rcd; struct ib_other_headers *ohdr = packet->ohdr; struct rvt_ack_entry *e; struct tid_rdma_request *req; struct tid_rdma_flow *flow; struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); unsigned long flags; u32 psn, next; u8 opcode; bool fecn; fecn = process_ecn(qp, packet); psn = mask_psn(be32_to_cpu(ohdr->bth[2])); opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff; /* * All error handling should be done by now. If we are here, the packet * is either good or been accepted by the error handler. */ spin_lock_irqsave(&qp->s_lock, flags); e = &qp->s_ack_queue[priv->r_tid_tail]; req = ack_to_tid_req(e); flow = &req->flows[req->clear_tail]; if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.lpsn))) { update_r_next_psn_fecn(packet, priv, rcd, flow, fecn); if (cmp_psn(psn, flow->flow_state.r_next_psn)) goto send_nak; flow->flow_state.r_next_psn = mask_psn(psn + 1); /* * Copy the payload to destination buffer if this packet is * delivered as an eager packet due to RSM rule and FECN. * The RSM rule selects FECN bit in BTH and SH bit in * KDETH header and therefore will not match the last * packet of each segment that has SH bit cleared. */ if (fecn && packet->etype == RHF_RCV_TYPE_EAGER) { struct rvt_sge_state ss; u32 len; u32 tlen = packet->tlen; u16 hdrsize = packet->hlen; u8 pad = packet->pad; u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2); u32 pmtu = qp->pmtu; if (unlikely(tlen != (hdrsize + pmtu + extra_bytes))) goto send_nak; len = req->comp_seg * req->seg_len; len += delta_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) * pmtu; if (unlikely(req->total_len - len < pmtu)) goto send_nak; /* * The e->rdma_sge field is set when TID RDMA WRITE REQ * is first received and is never modified thereafter. */ ss.sge = e->rdma_sge; ss.sg_list = NULL; ss.num_sge = 1; ss.total_len = req->total_len; rvt_skip_sge(&ss, len, false); rvt_copy_sge(qp, &ss, packet->payload, pmtu, false, false); /* Raise the sw sequence check flag for next packet */ priv->r_next_psn_kdeth = mask_psn(psn + 1); priv->s_flags |= HFI1_R_TID_SW_PSN; } goto exit; } flow->flow_state.r_next_psn = mask_psn(psn + 1); hfi1_kern_exp_rcv_clear(req); priv->alloc_w_segs--; rcd->flows[flow->idx].psn = psn & HFI1_KDETH_BTH_SEQ_MASK; req->comp_seg++; priv->s_nak_state = 0; /* * Release the flow if one of the following conditions has been met: * - The request has reached a sync point AND all outstanding * segments have been completed, or * - The entire request is complete and there are no more requests * (of any kind) in the queue. */ trace_hfi1_rsp_rcv_tid_write_data(qp, psn); trace_hfi1_tid_req_rcv_write_data(qp, 0, e->opcode, e->psn, e->lpsn, req); trace_hfi1_tid_write_rsp_rcv_data(qp); validate_r_tid_ack(priv); if (opcode == TID_OP(WRITE_DATA_LAST)) { release_rdma_sge_mr(e); for (next = priv->r_tid_tail + 1; ; next++) { if (next > rvt_size_atomic(&dev->rdi)) next = 0; if (next == priv->r_tid_head) break; e = &qp->s_ack_queue[next]; if (e->opcode == TID_OP(WRITE_REQ)) break; } priv->r_tid_tail = next; if (++qp->s_acked_ack_queue > rvt_size_atomic(&dev->rdi)) qp->s_acked_ack_queue = 0; } hfi1_tid_write_alloc_resources(qp, true); /* * If we need to generate more responses, schedule the * send engine. */ if (req->cur_seg < req->total_segs || qp->s_tail_ack_queue != qp->r_head_ack_queue) { qp->s_flags |= RVT_S_RESP_PENDING; hfi1_schedule_send(qp); } priv->pending_tid_w_segs--; if (priv->s_flags & HFI1_R_TID_RSC_TIMER) { if (priv->pending_tid_w_segs) hfi1_mod_tid_reap_timer(req->qp); else hfi1_stop_tid_reap_timer(req->qp); } done: tid_rdma_schedule_ack(qp); exit: priv->r_next_psn_kdeth = flow->flow_state.r_next_psn; if (fecn) qp->s_flags |= RVT_S_ECN; spin_unlock_irqrestore(&qp->s_lock, flags); return; send_nak: if (!priv->s_nak_state) { priv->s_nak_state = IB_NAK_PSN_ERROR; priv->s_nak_psn = flow->flow_state.r_next_psn; tid_rdma_trigger_ack(qp); } goto done; } static bool hfi1_tid_rdma_is_resync_psn(u32 psn) { return (bool)((psn & HFI1_KDETH_BTH_SEQ_MASK) == HFI1_KDETH_BTH_SEQ_MASK); } u32 hfi1_build_tid_rdma_write_ack(struct rvt_qp *qp, struct rvt_ack_entry *e, struct ib_other_headers *ohdr, u16 iflow, u32 *bth1, u32 *bth2) { struct hfi1_qp_priv *qpriv = qp->priv; struct tid_flow_state *fs = &qpriv->flow_state; struct tid_rdma_request *req = ack_to_tid_req(e); struct tid_rdma_flow *flow = &req->flows[iflow]; struct tid_rdma_params *remote; rcu_read_lock(); remote = rcu_dereference(qpriv->tid_rdma.remote); KDETH_RESET(ohdr->u.tid_rdma.ack.kdeth1, JKEY, remote->jkey); ohdr->u.tid_rdma.ack.verbs_qp = cpu_to_be32(qp->remote_qpn); *bth1 = remote->qp; rcu_read_unlock(); if (qpriv->resync) { *bth2 = mask_psn((fs->generation << HFI1_KDETH_BTH_SEQ_SHIFT) - 1); ohdr->u.tid_rdma.ack.aeth = rvt_compute_aeth(qp); } else if (qpriv->s_nak_state) { *bth2 = mask_psn(qpriv->s_nak_psn); ohdr->u.tid_rdma.ack.aeth = cpu_to_be32((qp->r_msn & IB_MSN_MASK) | (qpriv->s_nak_state << IB_AETH_CREDIT_SHIFT)); } else { *bth2 = full_flow_psn(flow, flow->flow_state.lpsn); ohdr->u.tid_rdma.ack.aeth = rvt_compute_aeth(qp); } KDETH_RESET(ohdr->u.tid_rdma.ack.kdeth0, KVER, 0x1); ohdr->u.tid_rdma.ack.tid_flow_qp = cpu_to_be32(qpriv->tid_rdma.local.qp | ((flow->idx & TID_RDMA_DESTQP_FLOW_MASK) << TID_RDMA_DESTQP_FLOW_SHIFT) | qpriv->rcd->ctxt); ohdr->u.tid_rdma.ack.tid_flow_psn = 0; ohdr->u.tid_rdma.ack.verbs_psn = cpu_to_be32(flow->flow_state.resp_ib_psn); if (qpriv->resync) { /* * If the PSN before the current expect KDETH PSN is the * RESYNC PSN, then we never received a good TID RDMA WRITE * DATA packet after a previous RESYNC. * In this case, the next expected KDETH PSN stays the same. */ if (hfi1_tid_rdma_is_resync_psn(qpriv->r_next_psn_kdeth - 1)) { ohdr->u.tid_rdma.ack.tid_flow_psn = cpu_to_be32(qpriv->r_next_psn_kdeth_save); } else { /* * Because the KDETH PSNs jump during a RESYNC, it's * not possible to infer (or compute) the previous value * of r_next_psn_kdeth in the case of back-to-back * RESYNC packets. Therefore, we save it. */ qpriv->r_next_psn_kdeth_save = qpriv->r_next_psn_kdeth - 1; ohdr->u.tid_rdma.ack.tid_flow_psn = cpu_to_be32(qpriv->r_next_psn_kdeth_save); qpriv->r_next_psn_kdeth = mask_psn(*bth2 + 1); } qpriv->resync = false; } return sizeof(ohdr->u.tid_rdma.ack) / sizeof(u32); } void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet) { struct ib_other_headers *ohdr = packet->ohdr; struct rvt_qp *qp = packet->qp; struct hfi1_qp_priv *qpriv = qp->priv; struct rvt_swqe *wqe; struct tid_rdma_request *req; struct tid_rdma_flow *flow; u32 aeth, psn, req_psn, ack_psn, flpsn, resync_psn, ack_kpsn; unsigned long flags; u16 fidx; trace_hfi1_tid_write_sender_rcv_tid_ack(qp, 0); process_ecn(qp, packet); psn = mask_psn(be32_to_cpu(ohdr->bth[2])); aeth = be32_to_cpu(ohdr->u.tid_rdma.ack.aeth); req_psn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.ack.verbs_psn)); resync_psn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.ack.tid_flow_psn)); spin_lock_irqsave(&qp->s_lock, flags); trace_hfi1_rcv_tid_ack(qp, aeth, psn, req_psn, resync_psn); /* If we are waiting for an ACK to RESYNC, drop any other packets */ if ((qp->s_flags & HFI1_S_WAIT_HALT) && cmp_psn(psn, qpriv->s_resync_psn)) goto ack_op_err; ack_psn = req_psn; if (hfi1_tid_rdma_is_resync_psn(psn)) ack_kpsn = resync_psn; else ack_kpsn = psn; if (aeth >> 29) { ack_psn--; ack_kpsn--; } if (unlikely(qp->s_acked == qp->s_tail)) goto ack_op_err; wqe = rvt_get_swqe_ptr(qp, qp->s_acked); if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE) goto ack_op_err; req = wqe_to_tid_req(wqe); trace_hfi1_tid_req_rcv_tid_ack(qp, 0, wqe->wr.opcode, wqe->psn, wqe->lpsn, req); flow = &req->flows[req->acked_tail]; trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow); /* Drop stale ACK/NAK */ if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0 || cmp_psn(req_psn, flow->flow_state.resp_ib_psn) < 0) goto ack_op_err; while (cmp_psn(ack_kpsn, full_flow_psn(flow, flow->flow_state.lpsn)) >= 0 && req->ack_seg < req->cur_seg) { req->ack_seg++; /* advance acked segment pointer */ req->acked_tail = CIRC_NEXT(req->acked_tail, MAX_FLOWS); req->r_last_acked = flow->flow_state.resp_ib_psn; trace_hfi1_tid_req_rcv_tid_ack(qp, 0, wqe->wr.opcode, wqe->psn, wqe->lpsn, req); if (req->ack_seg == req->total_segs) { req->state = TID_REQUEST_COMPLETE; wqe = do_rc_completion(qp, wqe, to_iport(qp->ibqp.device, qp->port_num)); trace_hfi1_sender_rcv_tid_ack(qp); atomic_dec(&qpriv->n_tid_requests); if (qp->s_acked == qp->s_tail) break; if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE) break; req = wqe_to_tid_req(wqe); } flow = &req->flows[req->acked_tail]; trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow); } trace_hfi1_tid_req_rcv_tid_ack(qp, 0, wqe->wr.opcode, wqe->psn, wqe->lpsn, req); switch (aeth >> 29) { case 0: /* ACK */ if (qpriv->s_flags & RVT_S_WAIT_ACK) qpriv->s_flags &= ~RVT_S_WAIT_ACK; if (!hfi1_tid_rdma_is_resync_psn(psn)) { /* Check if there is any pending TID ACK */ if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE && req->ack_seg < req->cur_seg) hfi1_mod_tid_retry_timer(qp); else hfi1_stop_tid_retry_timer(qp); hfi1_schedule_send(qp); } else { u32 spsn, fpsn, last_acked, generation; struct tid_rdma_request *rptr; /* ACK(RESYNC) */ hfi1_stop_tid_retry_timer(qp); /* Allow new requests (see hfi1_make_tid_rdma_pkt) */ qp->s_flags &= ~HFI1_S_WAIT_HALT; /* * Clear RVT_S_SEND_ONE flag in case that the TID RDMA * ACK is received after the TID retry timer is fired * again. In this case, do not send any more TID * RESYNC request or wait for any more TID ACK packet. */ qpriv->s_flags &= ~RVT_S_SEND_ONE; hfi1_schedule_send(qp); if ((qp->s_acked == qpriv->s_tid_tail && req->ack_seg == req->total_segs) || qp->s_acked == qp->s_tail) { qpriv->s_state = TID_OP(WRITE_DATA_LAST); goto done; } if (req->ack_seg == req->comp_seg) { qpriv->s_state = TID_OP(WRITE_DATA); goto done; } /* * The PSN to start with is the next PSN after the * RESYNC PSN. */ psn = mask_psn(psn + 1); generation = psn >> HFI1_KDETH_BTH_SEQ_SHIFT; spsn = 0; /* * Update to the correct WQE when we get an ACK(RESYNC) * in the middle of a request. */ if (delta_psn(ack_psn, wqe->lpsn)) wqe = rvt_get_swqe_ptr(qp, qp->s_acked); req = wqe_to_tid_req(wqe); flow = &req->flows[req->acked_tail]; /* * RESYNC re-numbers the PSN ranges of all remaining * segments. Also, PSN's start from 0 in the middle of a * segment and the first segment size is less than the * default number of packets. flow->resync_npkts is used * to track the number of packets from the start of the * real segment to the point of 0 PSN after the RESYNC * in order to later correctly rewind the SGE. */ fpsn = full_flow_psn(flow, flow->flow_state.spsn); req->r_ack_psn = psn; /* * If resync_psn points to the last flow PSN for a * segment and the new segment (likely from a new * request) starts with a new generation number, we * need to adjust resync_psn accordingly. */ if (flow->flow_state.generation != (resync_psn >> HFI1_KDETH_BTH_SEQ_SHIFT)) resync_psn = mask_psn(fpsn - 1); flow->resync_npkts += delta_psn(mask_psn(resync_psn + 1), fpsn); /* * Renumber all packet sequence number ranges * based on the new generation. */ last_acked = qp->s_acked; rptr = req; while (1) { /* start from last acked segment */ for (fidx = rptr->acked_tail; CIRC_CNT(rptr->setup_head, fidx, MAX_FLOWS); fidx = CIRC_NEXT(fidx, MAX_FLOWS)) { u32 lpsn; u32 gen; flow = &rptr->flows[fidx]; gen = flow->flow_state.generation; if (WARN_ON(gen == generation && flow->flow_state.spsn != spsn)) continue; lpsn = flow->flow_state.lpsn; lpsn = full_flow_psn(flow, lpsn); flow->npkts = delta_psn(lpsn, mask_psn(resync_psn) ); flow->flow_state.generation = generation; flow->flow_state.spsn = spsn; flow->flow_state.lpsn = flow->flow_state.spsn + flow->npkts - 1; flow->pkt = 0; spsn += flow->npkts; resync_psn += flow->npkts; trace_hfi1_tid_flow_rcv_tid_ack(qp, fidx, flow); } if (++last_acked == qpriv->s_tid_cur + 1) break; if (last_acked == qp->s_size) last_acked = 0; wqe = rvt_get_swqe_ptr(qp, last_acked); rptr = wqe_to_tid_req(wqe); } req->cur_seg = req->ack_seg; qpriv->s_tid_tail = qp->s_acked; qpriv->s_state = TID_OP(WRITE_REQ); hfi1_schedule_tid_send(qp); } done: qpriv->s_retry = qp->s_retry_cnt; break; case 3: /* NAK */ hfi1_stop_tid_retry_timer(qp); switch ((aeth >> IB_AETH_CREDIT_SHIFT) & IB_AETH_CREDIT_MASK) { case 0: /* PSN sequence error */ if (!req->flows) break; flow = &req->flows[req->acked_tail]; flpsn = full_flow_psn(flow, flow->flow_state.lpsn); if (cmp_psn(psn, flpsn) > 0) break; trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow); req->r_ack_psn = mask_psn(be32_to_cpu(ohdr->bth[2])); req->cur_seg = req->ack_seg; qpriv->s_tid_tail = qp->s_acked; qpriv->s_state = TID_OP(WRITE_REQ); qpriv->s_retry = qp->s_retry_cnt; hfi1_schedule_tid_send(qp); break; default: break; } break; default: break; } ack_op_err: spin_unlock_irqrestore(&qp->s_lock, flags); } void hfi1_add_tid_retry_timer(struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; struct ib_qp *ibqp = &qp->ibqp; struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); lockdep_assert_held(&qp->s_lock); if (!(priv->s_flags & HFI1_S_TID_RETRY_TIMER)) { priv->s_flags |= HFI1_S_TID_RETRY_TIMER; priv->s_tid_retry_timer.expires = jiffies + priv->tid_retry_timeout_jiffies + rdi->busy_jiffies; add_timer(&priv->s_tid_retry_timer); } } static void hfi1_mod_tid_retry_timer(struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; struct ib_qp *ibqp = &qp->ibqp; struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); lockdep_assert_held(&qp->s_lock); priv->s_flags |= HFI1_S_TID_RETRY_TIMER; mod_timer(&priv->s_tid_retry_timer, jiffies + priv->tid_retry_timeout_jiffies + rdi->busy_jiffies); } static int hfi1_stop_tid_retry_timer(struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; int rval = 0; lockdep_assert_held(&qp->s_lock); if (priv->s_flags & HFI1_S_TID_RETRY_TIMER) { rval = del_timer(&priv->s_tid_retry_timer); priv->s_flags &= ~HFI1_S_TID_RETRY_TIMER; } return rval; } void hfi1_del_tid_retry_timer(struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; del_timer_sync(&priv->s_tid_retry_timer); priv->s_flags &= ~HFI1_S_TID_RETRY_TIMER; } static void hfi1_tid_retry_timeout(struct timer_list *t) { struct hfi1_qp_priv *priv = from_timer(priv, t, s_tid_retry_timer); struct rvt_qp *qp = priv->owner; struct rvt_swqe *wqe; unsigned long flags; struct tid_rdma_request *req; spin_lock_irqsave(&qp->r_lock, flags); spin_lock(&qp->s_lock); trace_hfi1_tid_write_sender_retry_timeout(qp, 0); if (priv->s_flags & HFI1_S_TID_RETRY_TIMER) { hfi1_stop_tid_retry_timer(qp); if (!priv->s_retry) { trace_hfi1_msg_tid_retry_timeout(/* msg */ qp, "Exhausted retries. Tid retry timeout = ", (u64)priv->tid_retry_timeout_jiffies); wqe = rvt_get_swqe_ptr(qp, qp->s_acked); hfi1_trdma_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR); rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); } else { wqe = rvt_get_swqe_ptr(qp, qp->s_acked); req = wqe_to_tid_req(wqe); trace_hfi1_tid_req_tid_retry_timeout(/* req */ qp, 0, wqe->wr.opcode, wqe->psn, wqe->lpsn, req); priv->s_flags &= ~RVT_S_WAIT_ACK; /* Only send one packet (the RESYNC) */ priv->s_flags |= RVT_S_SEND_ONE; /* * No additional request shall be made by this QP until * the RESYNC has been complete. */ qp->s_flags |= HFI1_S_WAIT_HALT; priv->s_state = TID_OP(RESYNC); priv->s_retry--; hfi1_schedule_tid_send(qp); } } spin_unlock(&qp->s_lock); spin_unlock_irqrestore(&qp->r_lock, flags); } u32 hfi1_build_tid_rdma_resync(struct rvt_qp *qp, struct rvt_swqe *wqe, struct ib_other_headers *ohdr, u32 *bth1, u32 *bth2, u16 fidx) { struct hfi1_qp_priv *qpriv = qp->priv; struct tid_rdma_params *remote; struct tid_rdma_request *req = wqe_to_tid_req(wqe); struct tid_rdma_flow *flow = &req->flows[fidx]; u32 generation; rcu_read_lock(); remote = rcu_dereference(qpriv->tid_rdma.remote); KDETH_RESET(ohdr->u.tid_rdma.ack.kdeth1, JKEY, remote->jkey); ohdr->u.tid_rdma.ack.verbs_qp = cpu_to_be32(qp->remote_qpn); *bth1 = remote->qp; rcu_read_unlock(); generation = kern_flow_generation_next(flow->flow_state.generation); *bth2 = mask_psn((generation << HFI1_KDETH_BTH_SEQ_SHIFT) - 1); qpriv->s_resync_psn = *bth2; *bth2 |= IB_BTH_REQ_ACK; KDETH_RESET(ohdr->u.tid_rdma.ack.kdeth0, KVER, 0x1); return sizeof(ohdr->u.tid_rdma.resync) / sizeof(u32); } void hfi1_rc_rcv_tid_rdma_resync(struct hfi1_packet *packet) { struct ib_other_headers *ohdr = packet->ohdr; struct rvt_qp *qp = packet->qp; struct hfi1_qp_priv *qpriv = qp->priv; struct hfi1_ctxtdata *rcd = qpriv->rcd; struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); struct rvt_ack_entry *e; struct tid_rdma_request *req; struct tid_rdma_flow *flow; struct tid_flow_state *fs = &qpriv->flow_state; u32 psn, generation, idx, gen_next; bool fecn; unsigned long flags; fecn = process_ecn(qp, packet); psn = mask_psn(be32_to_cpu(ohdr->bth[2])); generation = mask_psn(psn + 1) >> HFI1_KDETH_BTH_SEQ_SHIFT; spin_lock_irqsave(&qp->s_lock, flags); gen_next = (fs->generation == KERN_GENERATION_RESERVED) ? generation : kern_flow_generation_next(fs->generation); /* * RESYNC packet contains the "next" generation and can only be * from the current or previous generations */ if (generation != mask_generation(gen_next - 1) && generation != gen_next) goto bail; /* Already processing a resync */ if (qpriv->resync) goto bail; spin_lock(&rcd->exp_lock); if (fs->index >= RXE_NUM_TID_FLOWS) { /* * If we don't have a flow, save the generation so it can be * applied when a new flow is allocated */ fs->generation = generation; } else { /* Reprogram the QP flow with new generation */ rcd->flows[fs->index].generation = generation; fs->generation = kern_setup_hw_flow(rcd, fs->index); } fs->psn = 0; /* * Disable SW PSN checking since a RESYNC is equivalent to a * sync point and the flow has/will be reprogrammed */ qpriv->s_flags &= ~HFI1_R_TID_SW_PSN; trace_hfi1_tid_write_rsp_rcv_resync(qp); /* * Reset all TID flow information with the new generation. * This is done for all requests and segments after the * last received segment */ for (idx = qpriv->r_tid_tail; ; idx++) { u16 flow_idx; if (idx > rvt_size_atomic(&dev->rdi)) idx = 0; e = &qp->s_ack_queue[idx]; if (e->opcode == TID_OP(WRITE_REQ)) { req = ack_to_tid_req(e); trace_hfi1_tid_req_rcv_resync(qp, 0, e->opcode, e->psn, e->lpsn, req); /* start from last unacked segment */ for (flow_idx = req->clear_tail; CIRC_CNT(req->setup_head, flow_idx, MAX_FLOWS); flow_idx = CIRC_NEXT(flow_idx, MAX_FLOWS)) { u32 lpsn; u32 next; flow = &req->flows[flow_idx]; lpsn = full_flow_psn(flow, flow->flow_state.lpsn); next = flow->flow_state.r_next_psn; flow->npkts = delta_psn(lpsn, next - 1); flow->flow_state.generation = fs->generation; flow->flow_state.spsn = fs->psn; flow->flow_state.lpsn = flow->flow_state.spsn + flow->npkts - 1; flow->flow_state.r_next_psn = full_flow_psn(flow, flow->flow_state.spsn); fs->psn += flow->npkts; trace_hfi1_tid_flow_rcv_resync(qp, flow_idx, flow); } } if (idx == qp->s_tail_ack_queue) break; } spin_unlock(&rcd->exp_lock); qpriv->resync = true; /* RESYNC request always gets a TID RDMA ACK. */ qpriv->s_nak_state = 0; tid_rdma_trigger_ack(qp); bail: if (fecn) qp->s_flags |= RVT_S_ECN; spin_unlock_irqrestore(&qp->s_lock, flags); } /* * Call this function when the last TID RDMA WRITE DATA packet for a request * is built. */ static void update_tid_tail(struct rvt_qp *qp) __must_hold(&qp->s_lock) { struct hfi1_qp_priv *priv = qp->priv; u32 i; struct rvt_swqe *wqe; lockdep_assert_held(&qp->s_lock); /* Can't move beyond s_tid_cur */ if (priv->s_tid_tail == priv->s_tid_cur) return; for (i = priv->s_tid_tail + 1; ; i++) { if (i == qp->s_size) i = 0; if (i == priv->s_tid_cur) break; wqe = rvt_get_swqe_ptr(qp, i); if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) break; } priv->s_tid_tail = i; priv->s_state = TID_OP(WRITE_RESP); } int hfi1_make_tid_rdma_pkt(struct rvt_qp *qp, struct hfi1_pkt_state *ps) __must_hold(&qp->s_lock) { struct hfi1_qp_priv *priv = qp->priv; struct rvt_swqe *wqe; u32 bth1 = 0, bth2 = 0, hwords = 5, len, middle = 0; struct ib_other_headers *ohdr; struct rvt_sge_state *ss = &qp->s_sge; struct rvt_ack_entry *e = &qp->s_ack_queue[qp->s_tail_ack_queue]; struct tid_rdma_request *req = ack_to_tid_req(e); bool last = false; u8 opcode = TID_OP(WRITE_DATA); lockdep_assert_held(&qp->s_lock); trace_hfi1_tid_write_sender_make_tid_pkt(qp, 0); /* * Prioritize the sending of the requests and responses over the * sending of the TID RDMA data packets. */ if (((atomic_read(&priv->n_tid_requests) < HFI1_TID_RDMA_WRITE_CNT) && atomic_read(&priv->n_requests) && !(qp->s_flags & (RVT_S_BUSY | RVT_S_WAIT_ACK | HFI1_S_ANY_WAIT_IO))) || (e->opcode == TID_OP(WRITE_REQ) && req->cur_seg < req->alloc_seg && !(qp->s_flags & (RVT_S_BUSY | HFI1_S_ANY_WAIT_IO)))) { struct iowait_work *iowork; iowork = iowait_get_ib_work(&priv->s_iowait); ps->s_txreq = get_waiting_verbs_txreq(iowork); if (ps->s_txreq || hfi1_make_rc_req(qp, ps)) { priv->s_flags |= HFI1_S_TID_BUSY_SET; return 1; } } ps->s_txreq = get_txreq(ps->dev, qp); if (!ps->s_txreq) goto bail_no_tx; ohdr = &ps->s_txreq->phdr.hdr.ibh.u.oth; if ((priv->s_flags & RVT_S_ACK_PENDING) && make_tid_rdma_ack(qp, ohdr, ps)) return 1; /* * Bail out if we can't send data. * Be reminded that this check must been done after the call to * make_tid_rdma_ack() because the responding QP could be in * RTR state where it can send TID RDMA ACK, not TID RDMA WRITE DATA. */ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) goto bail; if (priv->s_flags & RVT_S_WAIT_ACK) goto bail; /* Check whether there is anything to do. */ if (priv->s_tid_tail == HFI1_QP_WQE_INVALID) goto bail; wqe = rvt_get_swqe_ptr(qp, priv->s_tid_tail); req = wqe_to_tid_req(wqe); trace_hfi1_tid_req_make_tid_pkt(qp, 0, wqe->wr.opcode, wqe->psn, wqe->lpsn, req); switch (priv->s_state) { case TID_OP(WRITE_REQ): case TID_OP(WRITE_RESP): priv->tid_ss.sge = wqe->sg_list[0]; priv->tid_ss.sg_list = wqe->sg_list + 1; priv->tid_ss.num_sge = wqe->wr.num_sge; priv->tid_ss.total_len = wqe->length; if (priv->s_state == TID_OP(WRITE_REQ)) hfi1_tid_rdma_restart_req(qp, wqe, &bth2); priv->s_state = TID_OP(WRITE_DATA); fallthrough; case TID_OP(WRITE_DATA): /* * 1. Check whether TID RDMA WRITE RESP available. * 2. If no: * 2.1 If have more segments and no TID RDMA WRITE RESP, * set HFI1_S_WAIT_TID_RESP * 2.2 Return indicating no progress made. * 3. If yes: * 3.1 Build TID RDMA WRITE DATA packet. * 3.2 If last packet in segment: * 3.2.1 Change KDETH header bits * 3.2.2 Advance RESP pointers. * 3.3 Return indicating progress made. */ trace_hfi1_sender_make_tid_pkt(qp); trace_hfi1_tid_write_sender_make_tid_pkt(qp, 0); wqe = rvt_get_swqe_ptr(qp, priv->s_tid_tail); req = wqe_to_tid_req(wqe); len = wqe->length; if (!req->comp_seg || req->cur_seg == req->comp_seg) goto bail; trace_hfi1_tid_req_make_tid_pkt(qp, 0, wqe->wr.opcode, wqe->psn, wqe->lpsn, req); last = hfi1_build_tid_rdma_packet(wqe, ohdr, &bth1, &bth2, &len); if (last) { /* move pointer to next flow */ req->clear_tail = CIRC_NEXT(req->clear_tail, MAX_FLOWS); if (++req->cur_seg < req->total_segs) { if (!CIRC_CNT(req->setup_head, req->clear_tail, MAX_FLOWS)) qp->s_flags |= HFI1_S_WAIT_TID_RESP; } else { priv->s_state = TID_OP(WRITE_DATA_LAST); opcode = TID_OP(WRITE_DATA_LAST); /* Advance the s_tid_tail now */ update_tid_tail(qp); } } hwords += sizeof(ohdr->u.tid_rdma.w_data) / sizeof(u32); ss = &priv->tid_ss; break; case TID_OP(RESYNC): trace_hfi1_sender_make_tid_pkt(qp); /* Use generation from the most recently received response */ wqe = rvt_get_swqe_ptr(qp, priv->s_tid_cur); req = wqe_to_tid_req(wqe); /* If no responses for this WQE look at the previous one */ if (!req->comp_seg) { wqe = rvt_get_swqe_ptr(qp, (!priv->s_tid_cur ? qp->s_size : priv->s_tid_cur) - 1); req = wqe_to_tid_req(wqe); } hwords += hfi1_build_tid_rdma_resync(qp, wqe, ohdr, &bth1, &bth2, CIRC_PREV(req->setup_head, MAX_FLOWS)); ss = NULL; len = 0; opcode = TID_OP(RESYNC); break; default: goto bail; } if (priv->s_flags & RVT_S_SEND_ONE) { priv->s_flags &= ~RVT_S_SEND_ONE; priv->s_flags |= RVT_S_WAIT_ACK; bth2 |= IB_BTH_REQ_ACK; } qp->s_len -= len; ps->s_txreq->hdr_dwords = hwords; ps->s_txreq->sde = priv->s_sde; ps->s_txreq->ss = ss; ps->s_txreq->s_cur_size = len; hfi1_make_ruc_header(qp, ohdr, (opcode << 24), bth1, bth2, middle, ps); return 1; bail: hfi1_put_txreq(ps->s_txreq); bail_no_tx: ps->s_txreq = NULL; priv->s_flags &= ~RVT_S_BUSY; /* * If we didn't get a txreq, the QP will be woken up later to try * again, set the flags to the wake up which work item to wake * up. * (A better algorithm should be found to do this and generalize the * sleep/wakeup flags.) */ iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_TID); return 0; } static int make_tid_rdma_ack(struct rvt_qp *qp, struct ib_other_headers *ohdr, struct hfi1_pkt_state *ps) { struct rvt_ack_entry *e; struct hfi1_qp_priv *qpriv = qp->priv; struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); u32 hwords, next; u32 len = 0; u32 bth1 = 0, bth2 = 0; int middle = 0; u16 flow; struct tid_rdma_request *req, *nreq; trace_hfi1_tid_write_rsp_make_tid_ack(qp); /* Don't send an ACK if we aren't supposed to. */ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) goto bail; /* header size in 32-bit words LRH+BTH = (8+12)/4. */ hwords = 5; e = &qp->s_ack_queue[qpriv->r_tid_ack]; req = ack_to_tid_req(e); /* * In the RESYNC case, we are exactly one segment past the * previously sent ack or at the previously sent NAK. So to send * the resync ack, we go back one segment (which might be part of * the previous request) and let the do-while loop execute again. * The advantage of executing the do-while loop is that any data * received after the previous ack is automatically acked in the * RESYNC ack. It turns out that for the do-while loop we only need * to pull back qpriv->r_tid_ack, not the segment * indices/counters. The scheme works even if the previous request * was not a TID WRITE request. */ if (qpriv->resync) { if (!req->ack_seg || req->ack_seg == req->total_segs) qpriv->r_tid_ack = !qpriv->r_tid_ack ? rvt_size_atomic(&dev->rdi) : qpriv->r_tid_ack - 1; e = &qp->s_ack_queue[qpriv->r_tid_ack]; req = ack_to_tid_req(e); } trace_hfi1_rsp_make_tid_ack(qp, e->psn); trace_hfi1_tid_req_make_tid_ack(qp, 0, e->opcode, e->psn, e->lpsn, req); /* * If we've sent all the ACKs that we can, we are done * until we get more segments... */ if (!qpriv->s_nak_state && !qpriv->resync && req->ack_seg == req->comp_seg) goto bail; do { /* * To deal with coalesced ACKs, the acked_tail pointer * into the flow array is used. The distance between it * and the clear_tail is the number of flows that are * being ACK'ed. */ req->ack_seg += /* Get up-to-date value */ CIRC_CNT(req->clear_tail, req->acked_tail, MAX_FLOWS); /* Advance acked index */ req->acked_tail = req->clear_tail; /* * req->clear_tail points to the segment currently being * received. So, when sending an ACK, the previous * segment is being ACK'ed. */ flow = CIRC_PREV(req->acked_tail, MAX_FLOWS); if (req->ack_seg != req->total_segs) break; req->state = TID_REQUEST_COMPLETE; next = qpriv->r_tid_ack + 1; if (next > rvt_size_atomic(&dev->rdi)) next = 0; qpriv->r_tid_ack = next; if (qp->s_ack_queue[next].opcode != TID_OP(WRITE_REQ)) break; nreq = ack_to_tid_req(&qp->s_ack_queue[next]); if (!nreq->comp_seg || nreq->ack_seg == nreq->comp_seg) break; /* Move to the next ack entry now */ e = &qp->s_ack_queue[qpriv->r_tid_ack]; req = ack_to_tid_req(e); } while (1); /* * At this point qpriv->r_tid_ack == qpriv->r_tid_tail but e and * req could be pointing at the previous ack queue entry */ if (qpriv->s_nak_state || (qpriv->resync && !hfi1_tid_rdma_is_resync_psn(qpriv->r_next_psn_kdeth - 1) && (cmp_psn(qpriv->r_next_psn_kdeth - 1, full_flow_psn(&req->flows[flow], req->flows[flow].flow_state.lpsn)) > 0))) { /* * A NAK will implicitly acknowledge all previous TID RDMA * requests. Therefore, we NAK with the req->acked_tail * segment for the request at qpriv->r_tid_ack (same at * this point as the req->clear_tail segment for the * qpriv->r_tid_tail request) */ e = &qp->s_ack_queue[qpriv->r_tid_ack]; req = ack_to_tid_req(e); flow = req->acked_tail; } else if (req->ack_seg == req->total_segs && qpriv->s_flags & HFI1_R_TID_WAIT_INTERLCK) qpriv->s_flags &= ~HFI1_R_TID_WAIT_INTERLCK; trace_hfi1_tid_write_rsp_make_tid_ack(qp); trace_hfi1_tid_req_make_tid_ack(qp, 0, e->opcode, e->psn, e->lpsn, req); hwords += hfi1_build_tid_rdma_write_ack(qp, e, ohdr, flow, &bth1, &bth2); len = 0; qpriv->s_flags &= ~RVT_S_ACK_PENDING; ps->s_txreq->hdr_dwords = hwords; ps->s_txreq->sde = qpriv->s_sde; ps->s_txreq->s_cur_size = len; ps->s_txreq->ss = NULL; hfi1_make_ruc_header(qp, ohdr, (TID_OP(ACK) << 24), bth1, bth2, middle, ps); ps->s_txreq->txreq.flags |= SDMA_TXREQ_F_VIP; return 1; bail: /* * Ensure s_rdma_ack_cnt changes are committed prior to resetting * RVT_S_RESP_PENDING */ smp_wmb(); qpriv->s_flags &= ~RVT_S_ACK_PENDING; return 0; } static int hfi1_send_tid_ok(struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; return !(priv->s_flags & RVT_S_BUSY || qp->s_flags & HFI1_S_ANY_WAIT_IO) && (verbs_txreq_queued(iowait_get_tid_work(&priv->s_iowait)) || (priv->s_flags & RVT_S_RESP_PENDING) || !(qp->s_flags & HFI1_S_ANY_TID_WAIT_SEND)); } void _hfi1_do_tid_send(struct work_struct *work) { struct iowait_work *w = container_of(work, struct iowait_work, iowork); struct rvt_qp *qp = iowait_to_qp(w->iow); hfi1_do_tid_send(qp); } static void hfi1_do_tid_send(struct rvt_qp *qp) { struct hfi1_pkt_state ps; struct hfi1_qp_priv *priv = qp->priv; ps.dev = to_idev(qp->ibqp.device); ps.ibp = to_iport(qp->ibqp.device, qp->port_num); ps.ppd = ppd_from_ibp(ps.ibp); ps.wait = iowait_get_tid_work(&priv->s_iowait); ps.in_thread = false; ps.timeout_int = qp->timeout_jiffies / 8; trace_hfi1_rc_do_tid_send(qp, false); spin_lock_irqsave(&qp->s_lock, ps.flags); /* Return if we are already busy processing a work request. */ if (!hfi1_send_tid_ok(qp)) { if (qp->s_flags & HFI1_S_ANY_WAIT_IO) iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_TID); spin_unlock_irqrestore(&qp->s_lock, ps.flags); return; } priv->s_flags |= RVT_S_BUSY; ps.timeout = jiffies + ps.timeout_int; ps.cpu = priv->s_sde ? priv->s_sde->cpu : cpumask_first(cpumask_of_node(ps.ppd->dd->node)); ps.pkts_sent = false; /* insure a pre-built packet is handled */ ps.s_txreq = get_waiting_verbs_txreq(ps.wait); do { /* Check for a constructed packet to be sent. */ if (ps.s_txreq) { if (priv->s_flags & HFI1_S_TID_BUSY_SET) { qp->s_flags |= RVT_S_BUSY; ps.wait = iowait_get_ib_work(&priv->s_iowait); } spin_unlock_irqrestore(&qp->s_lock, ps.flags); /* * If the packet cannot be sent now, return and * the send tasklet will be woken up later. */ if (hfi1_verbs_send(qp, &ps)) return; /* allow other tasks to run */ if (hfi1_schedule_send_yield(qp, &ps, true)) return; spin_lock_irqsave(&qp->s_lock, ps.flags); if (priv->s_flags & HFI1_S_TID_BUSY_SET) { qp->s_flags &= ~RVT_S_BUSY; priv->s_flags &= ~HFI1_S_TID_BUSY_SET; ps.wait = iowait_get_tid_work(&priv->s_iowait); if (iowait_flag_set(&priv->s_iowait, IOWAIT_PENDING_IB)) hfi1_schedule_send(qp); } } } while (hfi1_make_tid_rdma_pkt(qp, &ps)); iowait_starve_clear(ps.pkts_sent, &priv->s_iowait); spin_unlock_irqrestore(&qp->s_lock, ps.flags); } static bool _hfi1_schedule_tid_send(struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); struct hfi1_devdata *dd = ppd->dd; if ((dd->flags & HFI1_SHUTDOWN)) return true; return iowait_tid_schedule(&priv->s_iowait, ppd->hfi1_wq, priv->s_sde ? priv->s_sde->cpu : cpumask_first(cpumask_of_node(dd->node))); } /** * hfi1_schedule_tid_send - schedule progress on TID RDMA state machine * @qp: the QP * * This schedules qp progress on the TID RDMA state machine. Caller * should hold the s_lock. * Unlike hfi1_schedule_send(), this cannot use hfi1_send_ok() because * the two state machines can step on each other with respect to the * RVT_S_BUSY flag. * Therefore, a modified test is used. * @return true if the second leg is scheduled; * false if the second leg is not scheduled. */ bool hfi1_schedule_tid_send(struct rvt_qp *qp) { lockdep_assert_held(&qp->s_lock); if (hfi1_send_tid_ok(qp)) { /* * The following call returns true if the qp is not on the * queue and false if the qp is already on the queue before * this call. Either way, the qp will be on the queue when the * call returns. */ _hfi1_schedule_tid_send(qp); return true; } if (qp->s_flags & HFI1_S_ANY_WAIT_IO) iowait_set_flag(&((struct hfi1_qp_priv *)qp->priv)->s_iowait, IOWAIT_PENDING_TID); return false; } bool hfi1_tid_rdma_ack_interlock(struct rvt_qp *qp, struct rvt_ack_entry *e) { struct rvt_ack_entry *prev; struct tid_rdma_request *req; struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); struct hfi1_qp_priv *priv = qp->priv; u32 s_prev; s_prev = qp->s_tail_ack_queue == 0 ? rvt_size_atomic(&dev->rdi) : (qp->s_tail_ack_queue - 1); prev = &qp->s_ack_queue[s_prev]; if ((e->opcode == TID_OP(READ_REQ) || e->opcode == OP(RDMA_READ_REQUEST)) && prev->opcode == TID_OP(WRITE_REQ)) { req = ack_to_tid_req(prev); if (req->ack_seg != req->total_segs) { priv->s_flags |= HFI1_R_TID_WAIT_INTERLCK; return true; } } return false; } static u32 read_r_next_psn(struct hfi1_devdata *dd, u8 ctxt, u8 fidx) { u64 reg; /* * The only sane way to get the amount of * progress is to read the HW flow state. */ reg = read_uctxt_csr(dd, ctxt, RCV_TID_FLOW_TABLE + (8 * fidx)); return mask_psn(reg); } static void tid_rdma_rcv_err(struct hfi1_packet *packet, struct ib_other_headers *ohdr, struct rvt_qp *qp, u32 psn, int diff, bool fecn) { unsigned long flags; tid_rdma_rcv_error(packet, ohdr, qp, psn, diff); if (fecn) { spin_lock_irqsave(&qp->s_lock, flags); qp->s_flags |= RVT_S_ECN; spin_unlock_irqrestore(&qp->s_lock, flags); } } static void update_r_next_psn_fecn(struct hfi1_packet *packet, struct hfi1_qp_priv *priv, struct hfi1_ctxtdata *rcd, struct tid_rdma_flow *flow, bool fecn) { /* * If a start/middle packet is delivered here due to * RSM rule and FECN, we need to update the r_next_psn. */ if (fecn && packet->etype == RHF_RCV_TYPE_EAGER && !(priv->s_flags & HFI1_R_TID_SW_PSN)) { struct hfi1_devdata *dd = rcd->dd; flow->flow_state.r_next_psn = read_r_next_psn(dd, rcd->ctxt, flow->idx); } }
linux-master
drivers/infiniband/hw/hfi1/tid_rdma.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2015, 2016 Intel Corporation. */ #include <linux/cdev.h> #include <linux/device.h> #include <linux/fs.h> #include "hfi.h" #include "device.h" static char *hfi1_devnode(const struct device *dev, umode_t *mode) { if (mode) *mode = 0600; return kasprintf(GFP_KERNEL, "%s", dev_name(dev)); } static const struct class class = { .name = "hfi1", .devnode = hfi1_devnode, }; static char *hfi1_user_devnode(const struct device *dev, umode_t *mode) { if (mode) *mode = 0666; return kasprintf(GFP_KERNEL, "%s", dev_name(dev)); } static const struct class user_class = { .name = "hfi1_user", .devnode = hfi1_user_devnode, }; static dev_t hfi1_dev; int hfi1_cdev_init(int minor, const char *name, const struct file_operations *fops, struct cdev *cdev, struct device **devp, bool user_accessible, struct kobject *parent) { const dev_t dev = MKDEV(MAJOR(hfi1_dev), minor); struct device *device = NULL; int ret; cdev_init(cdev, fops); cdev->owner = THIS_MODULE; cdev_set_parent(cdev, parent); kobject_set_name(&cdev->kobj, name); ret = cdev_add(cdev, dev, 1); if (ret < 0) { pr_err("Could not add cdev for minor %d, %s (err %d)\n", minor, name, -ret); goto done; } if (user_accessible) device = device_create(&user_class, NULL, dev, NULL, "%s", name); else device = device_create(&class, NULL, dev, NULL, "%s", name); if (IS_ERR(device)) { ret = PTR_ERR(device); device = NULL; pr_err("Could not create device for minor %d, %s (err %d)\n", minor, name, -ret); cdev_del(cdev); } done: *devp = device; return ret; } void hfi1_cdev_cleanup(struct cdev *cdev, struct device **devp) { struct device *device = *devp; if (device) { device_unregister(device); *devp = NULL; cdev_del(cdev); } } static const char *hfi1_class_name = "hfi1"; const char *class_name(void) { return hfi1_class_name; } int __init dev_init(void) { int ret; ret = alloc_chrdev_region(&hfi1_dev, 0, HFI1_NMINORS, DRIVER_NAME); if (ret < 0) { pr_err("Could not allocate chrdev region (err %d)\n", -ret); goto done; } ret = class_register(&class); if (ret) { pr_err("Could not create device class (err %d)\n", -ret); unregister_chrdev_region(hfi1_dev, HFI1_NMINORS); goto done; } ret = class_register(&user_class); if (ret) { pr_err("Could not create device class for user accessible files (err %d)\n", -ret); class_unregister(&class); unregister_chrdev_region(hfi1_dev, HFI1_NMINORS); goto done; } done: return ret; } void dev_cleanup(void) { class_unregister(&class); class_unregister(&user_class); unregister_chrdev_region(hfi1_dev, HFI1_NMINORS); }
linux-master
drivers/infiniband/hw/hfi1/device.c
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) /* * Copyright(c) 2020 Intel Corporation. * */ #include "netdev.h" #include "ipoib.h" #define HFI1_IPOIB_SKB_PAD ((NET_SKB_PAD) + (NET_IP_ALIGN)) static void copy_ipoib_buf(struct sk_buff *skb, void *data, int size) { skb_checksum_none_assert(skb); skb->protocol = *((__be16 *)data); skb_put_data(skb, data, size); skb->mac_header = HFI1_IPOIB_PSEUDO_LEN; skb_pull(skb, HFI1_IPOIB_ENCAP_LEN); } static struct sk_buff *prepare_frag_skb(struct napi_struct *napi, int size) { struct sk_buff *skb; int skb_size = SKB_DATA_ALIGN(size + HFI1_IPOIB_SKB_PAD); void *frag; skb_size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); skb_size = SKB_DATA_ALIGN(skb_size); frag = napi_alloc_frag(skb_size); if (unlikely(!frag)) return napi_alloc_skb(napi, size); skb = build_skb(frag, skb_size); if (unlikely(!skb)) { skb_free_frag(frag); return NULL; } skb_reserve(skb, HFI1_IPOIB_SKB_PAD); return skb; } struct sk_buff *hfi1_ipoib_prepare_skb(struct hfi1_netdev_rxq *rxq, int size, void *data) { struct napi_struct *napi = &rxq->napi; int skb_size = size + HFI1_IPOIB_ENCAP_LEN; struct sk_buff *skb; /* * For smaller(4k + skb overhead) allocations we will go using * napi cache. Otherwise we will try to use napi frag cache. */ if (size <= SKB_WITH_OVERHEAD(PAGE_SIZE)) skb = napi_alloc_skb(napi, skb_size); else skb = prepare_frag_skb(napi, skb_size); if (unlikely(!skb)) return NULL; copy_ipoib_buf(skb, data, size); return skb; } int hfi1_ipoib_rxq_init(struct net_device *netdev) { struct hfi1_ipoib_dev_priv *ipoib_priv = hfi1_ipoib_priv(netdev); struct hfi1_devdata *dd = ipoib_priv->dd; int ret; ret = hfi1_netdev_rx_init(dd); if (ret) return ret; hfi1_init_aip_rsm(dd); return ret; } void hfi1_ipoib_rxq_deinit(struct net_device *netdev) { struct hfi1_ipoib_dev_priv *ipoib_priv = hfi1_ipoib_priv(netdev); struct hfi1_devdata *dd = ipoib_priv->dd; hfi1_deinit_aip_rsm(dd); hfi1_netdev_rx_destroy(dd); }
linux-master
drivers/infiniband/hw/hfi1/ipoib_rx.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2015 - 2019 Intel Corporation. */ #include <linux/pci.h> #include <linux/io.h> #include <linux/delay.h> #include <linux/vmalloc.h> #include <linux/module.h> #include "hfi.h" #include "chip_registers.h" #include "aspm.h" /* * This file contains PCIe utility routines. */ /* * Do all the common PCIe setup and initialization. */ int hfi1_pcie_init(struct hfi1_devdata *dd) { int ret; struct pci_dev *pdev = dd->pcidev; ret = pci_enable_device(pdev); if (ret) { /* * This can happen (in theory) iff: * We did a chip reset, and then failed to reprogram the * BAR, or the chip reset due to an internal error. We then * unloaded the driver and reloaded it. * * Both reset cases set the BAR back to initial state. For * the latter case, the AER sticky error bit at offset 0x718 * should be set, but the Linux kernel doesn't yet know * about that, it appears. If the original BAR was retained * in the kernel data structures, this may be OK. */ dd_dev_err(dd, "pci enable failed: error %d\n", -ret); return ret; } ret = pci_request_regions(pdev, DRIVER_NAME); if (ret) { dd_dev_err(dd, "pci_request_regions fails: err %d\n", -ret); goto bail; } ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (ret) { /* * If the 64 bit setup fails, try 32 bit. Some systems * do not setup 64 bit maps on systems with 2GB or less * memory installed. */ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (ret) { dd_dev_err(dd, "Unable to set DMA mask: %d\n", ret); goto bail; } } pci_set_master(pdev); return 0; bail: hfi1_pcie_cleanup(pdev); return ret; } /* * Clean what was done in hfi1_pcie_init() */ void hfi1_pcie_cleanup(struct pci_dev *pdev) { pci_disable_device(pdev); /* * Release regions should be called after the disable. OK to * call if request regions has not been called or failed. */ pci_release_regions(pdev); } /* * Do remaining PCIe setup, once dd is allocated, and save away * fields required to re-initialize after a chip reset, or for * various other purposes */ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev) { unsigned long len; resource_size_t addr; int ret = 0; u32 rcv_array_count; addr = pci_resource_start(pdev, 0); len = pci_resource_len(pdev, 0); /* * The TXE PIO buffers are at the tail end of the chip space. * Cut them off and map them separately. */ /* sanity check vs expectations */ if (len != TXE_PIO_SEND + TXE_PIO_SIZE) { dd_dev_err(dd, "chip PIO range does not match\n"); return -EINVAL; } dd->kregbase1 = ioremap(addr, RCV_ARRAY); if (!dd->kregbase1) { dd_dev_err(dd, "UC mapping of kregbase1 failed\n"); return -ENOMEM; } dd_dev_info(dd, "UC base1: %p for %x\n", dd->kregbase1, RCV_ARRAY); /* verify that reads actually work, save revision for reset check */ dd->revision = readq(dd->kregbase1 + CCE_REVISION); if (dd->revision == ~(u64)0) { dd_dev_err(dd, "Cannot read chip CSRs\n"); goto nomem; } rcv_array_count = readq(dd->kregbase1 + RCV_ARRAY_CNT); dd_dev_info(dd, "RcvArray count: %u\n", rcv_array_count); dd->base2_start = RCV_ARRAY + rcv_array_count * 8; dd->kregbase2 = ioremap( addr + dd->base2_start, TXE_PIO_SEND - dd->base2_start); if (!dd->kregbase2) { dd_dev_err(dd, "UC mapping of kregbase2 failed\n"); goto nomem; } dd_dev_info(dd, "UC base2: %p for %x\n", dd->kregbase2, TXE_PIO_SEND - dd->base2_start); dd->piobase = ioremap_wc(addr + TXE_PIO_SEND, TXE_PIO_SIZE); if (!dd->piobase) { dd_dev_err(dd, "WC mapping of send buffers failed\n"); goto nomem; } dd_dev_info(dd, "WC piobase: %p for %x\n", dd->piobase, TXE_PIO_SIZE); dd->physaddr = addr; /* used for io_remap, etc. */ /* * Map the chip's RcvArray as write-combining to allow us * to write an entire cacheline worth of entries in one shot. */ dd->rcvarray_wc = ioremap_wc(addr + RCV_ARRAY, rcv_array_count * 8); if (!dd->rcvarray_wc) { dd_dev_err(dd, "WC mapping of receive array failed\n"); goto nomem; } dd_dev_info(dd, "WC RcvArray: %p for %x\n", dd->rcvarray_wc, rcv_array_count * 8); dd->flags |= HFI1_PRESENT; /* chip.c CSR routines now work */ return 0; nomem: ret = -ENOMEM; hfi1_pcie_ddcleanup(dd); return ret; } /* * Do PCIe cleanup related to dd, after chip-specific cleanup, etc. Just prior * to releasing the dd memory. * Void because all of the core pcie cleanup functions are void. */ void hfi1_pcie_ddcleanup(struct hfi1_devdata *dd) { dd->flags &= ~HFI1_PRESENT; if (dd->kregbase1) iounmap(dd->kregbase1); dd->kregbase1 = NULL; if (dd->kregbase2) iounmap(dd->kregbase2); dd->kregbase2 = NULL; if (dd->rcvarray_wc) iounmap(dd->rcvarray_wc); dd->rcvarray_wc = NULL; if (dd->piobase) iounmap(dd->piobase); dd->piobase = NULL; } /* return the PCIe link speed from the given link status */ static u32 extract_speed(u16 linkstat) { u32 speed; switch (linkstat & PCI_EXP_LNKSTA_CLS) { default: /* not defined, assume Gen1 */ case PCI_EXP_LNKSTA_CLS_2_5GB: speed = 2500; /* Gen 1, 2.5GHz */ break; case PCI_EXP_LNKSTA_CLS_5_0GB: speed = 5000; /* Gen 2, 5GHz */ break; case PCI_EXP_LNKSTA_CLS_8_0GB: speed = 8000; /* Gen 3, 8GHz */ break; } return speed; } /* return the PCIe link speed from the given link status */ static u32 extract_width(u16 linkstat) { return (linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT; } /* read the link status and set dd->{lbus_width,lbus_speed,lbus_info} */ static void update_lbus_info(struct hfi1_devdata *dd) { u16 linkstat; int ret; ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKSTA, &linkstat); if (ret) { dd_dev_err(dd, "Unable to read from PCI config\n"); return; } dd->lbus_width = extract_width(linkstat); dd->lbus_speed = extract_speed(linkstat); snprintf(dd->lbus_info, sizeof(dd->lbus_info), "PCIe,%uMHz,x%u", dd->lbus_speed, dd->lbus_width); } /* * Read in the current PCIe link width and speed. Find if the link is * Gen3 capable. */ int pcie_speeds(struct hfi1_devdata *dd) { u32 linkcap; struct pci_dev *parent = dd->pcidev->bus->self; int ret; if (!pci_is_pcie(dd->pcidev)) { dd_dev_err(dd, "Can't find PCI Express capability!\n"); return -EINVAL; } /* find if our max speed is Gen3 and parent supports Gen3 speeds */ dd->link_gen3_capable = 1; ret = pcie_capability_read_dword(dd->pcidev, PCI_EXP_LNKCAP, &linkcap); if (ret) { dd_dev_err(dd, "Unable to read from PCI config\n"); return pcibios_err_to_errno(ret); } if ((linkcap & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_8_0GB) { dd_dev_info(dd, "This HFI is not Gen3 capable, max speed 0x%x, need 0x3\n", linkcap & PCI_EXP_LNKCAP_SLS); dd->link_gen3_capable = 0; } /* * bus->max_bus_speed is set from the bridge's linkcap Max Link Speed */ if (parent && (dd->pcidev->bus->max_bus_speed == PCIE_SPEED_2_5GT || dd->pcidev->bus->max_bus_speed == PCIE_SPEED_5_0GT)) { dd_dev_info(dd, "Parent PCIe bridge does not support Gen3\n"); dd->link_gen3_capable = 0; } /* obtain the link width and current speed */ update_lbus_info(dd); dd_dev_info(dd, "%s\n", dd->lbus_info); return 0; } /* * Restore command and BARs after a reset has wiped them out * * Returns 0 on success, otherwise a negative error value */ int restore_pci_variables(struct hfi1_devdata *dd) { int ret; ret = pci_write_config_word(dd->pcidev, PCI_COMMAND, dd->pci_command); if (ret) goto error; ret = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0, dd->pcibar0); if (ret) goto error; ret = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1, dd->pcibar1); if (ret) goto error; ret = pci_write_config_dword(dd->pcidev, PCI_ROM_ADDRESS, dd->pci_rom); if (ret) goto error; ret = pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL, dd->pcie_devctl); if (ret) goto error; ret = pcie_capability_write_word(dd->pcidev, PCI_EXP_LNKCTL, dd->pcie_lnkctl); if (ret) goto error; ret = pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL2, dd->pcie_devctl2); if (ret) goto error; ret = pci_write_config_dword(dd->pcidev, PCI_CFG_MSIX0, dd->pci_msix0); if (ret) goto error; if (pci_find_ext_capability(dd->pcidev, PCI_EXT_CAP_ID_TPH)) { ret = pci_write_config_dword(dd->pcidev, PCIE_CFG_TPH2, dd->pci_tph2); if (ret) goto error; } return 0; error: dd_dev_err(dd, "Unable to write to PCI config\n"); return pcibios_err_to_errno(ret); } /* * Save BARs and command to rewrite after device reset * * Returns 0 on success, otherwise a negative error value */ int save_pci_variables(struct hfi1_devdata *dd) { int ret; ret = pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0, &dd->pcibar0); if (ret) goto error; ret = pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1, &dd->pcibar1); if (ret) goto error; ret = pci_read_config_dword(dd->pcidev, PCI_ROM_ADDRESS, &dd->pci_rom); if (ret) goto error; ret = pci_read_config_word(dd->pcidev, PCI_COMMAND, &dd->pci_command); if (ret) goto error; ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &dd->pcie_devctl); if (ret) goto error; ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKCTL, &dd->pcie_lnkctl); if (ret) goto error; ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL2, &dd->pcie_devctl2); if (ret) goto error; ret = pci_read_config_dword(dd->pcidev, PCI_CFG_MSIX0, &dd->pci_msix0); if (ret) goto error; if (pci_find_ext_capability(dd->pcidev, PCI_EXT_CAP_ID_TPH)) { ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_TPH2, &dd->pci_tph2); if (ret) goto error; } return 0; error: dd_dev_err(dd, "Unable to read from PCI config\n"); return pcibios_err_to_errno(ret); } /* * BIOS may not set PCIe bus-utilization parameters for best performance. * Check and optionally adjust them to maximize our throughput. */ static int hfi1_pcie_caps; module_param_named(pcie_caps, hfi1_pcie_caps, int, 0444); MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (0..3), ReadReq (4..7)"); /** * tune_pcie_caps() - Code to adjust PCIe capabilities. * @dd: Valid device data structure * */ void tune_pcie_caps(struct hfi1_devdata *dd) { struct pci_dev *parent; u16 rc_mpss, rc_mps, ep_mpss, ep_mps; u16 rc_mrrs, ep_mrrs, max_mrrs, ectl; int ret; /* * Turn on extended tags in DevCtl in case the BIOS has turned it off * to improve WFR SDMA bandwidth */ ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &ectl); if ((!ret) && !(ectl & PCI_EXP_DEVCTL_EXT_TAG)) { dd_dev_info(dd, "Enabling PCIe extended tags\n"); ectl |= PCI_EXP_DEVCTL_EXT_TAG; ret = pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL, ectl); if (ret) dd_dev_info(dd, "Unable to write to PCI config\n"); } /* Find out supported and configured values for parent (root) */ parent = dd->pcidev->bus->self; /* * The driver cannot perform the tuning if it does not have * access to the upstream component. */ if (!parent) { dd_dev_info(dd, "Parent not found\n"); return; } if (!pci_is_root_bus(parent->bus)) { dd_dev_info(dd, "Parent not root\n"); return; } if (!pci_is_pcie(parent)) { dd_dev_info(dd, "Parent is not PCI Express capable\n"); return; } if (!pci_is_pcie(dd->pcidev)) { dd_dev_info(dd, "PCI device is not PCI Express capable\n"); return; } rc_mpss = parent->pcie_mpss; rc_mps = ffs(pcie_get_mps(parent)) - 8; /* Find out supported and configured values for endpoint (us) */ ep_mpss = dd->pcidev->pcie_mpss; ep_mps = ffs(pcie_get_mps(dd->pcidev)) - 8; /* Find max payload supported by root, endpoint */ if (rc_mpss > ep_mpss) rc_mpss = ep_mpss; /* If Supported greater than limit in module param, limit it */ if (rc_mpss > (hfi1_pcie_caps & 7)) rc_mpss = hfi1_pcie_caps & 7; /* If less than (allowed, supported), bump root payload */ if (rc_mpss > rc_mps) { rc_mps = rc_mpss; pcie_set_mps(parent, 128 << rc_mps); } /* If less than (allowed, supported), bump endpoint payload */ if (rc_mpss > ep_mps) { ep_mps = rc_mpss; pcie_set_mps(dd->pcidev, 128 << ep_mps); } /* * Now the Read Request size. * No field for max supported, but PCIe spec limits it to 4096, * which is code '5' (log2(4096) - 7) */ max_mrrs = 5; if (max_mrrs > ((hfi1_pcie_caps >> 4) & 7)) max_mrrs = (hfi1_pcie_caps >> 4) & 7; max_mrrs = 128 << max_mrrs; rc_mrrs = pcie_get_readrq(parent); ep_mrrs = pcie_get_readrq(dd->pcidev); if (max_mrrs > rc_mrrs) { rc_mrrs = max_mrrs; pcie_set_readrq(parent, rc_mrrs); } if (max_mrrs > ep_mrrs) { ep_mrrs = max_mrrs; pcie_set_readrq(dd->pcidev, ep_mrrs); } } /* End of PCIe capability tuning */ /* * From here through hfi1_pci_err_handler definition is invoked via * PCI error infrastructure, registered via pci */ static pci_ers_result_t pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct hfi1_devdata *dd = pci_get_drvdata(pdev); pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED; switch (state) { case pci_channel_io_normal: dd_dev_info(dd, "State Normal, ignoring\n"); break; case pci_channel_io_frozen: dd_dev_info(dd, "State Frozen, requesting reset\n"); pci_disable_device(pdev); ret = PCI_ERS_RESULT_NEED_RESET; break; case pci_channel_io_perm_failure: if (dd) { dd_dev_info(dd, "State Permanent Failure, disabling\n"); /* no more register accesses! */ dd->flags &= ~HFI1_PRESENT; hfi1_disable_after_error(dd); } /* else early, or other problem */ ret = PCI_ERS_RESULT_DISCONNECT; break; default: /* shouldn't happen */ dd_dev_info(dd, "HFI1 PCI errors detected (state %d)\n", state); break; } return ret; } static pci_ers_result_t pci_mmio_enabled(struct pci_dev *pdev) { u64 words = 0U; struct hfi1_devdata *dd = pci_get_drvdata(pdev); pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED; if (dd && dd->pport) { words = read_port_cntr(dd->pport, C_RX_WORDS, CNTR_INVALID_VL); if (words == ~0ULL) ret = PCI_ERS_RESULT_NEED_RESET; dd_dev_info(dd, "HFI1 mmio_enabled function called, read wordscntr %llx, returning %d\n", words, ret); } return ret; } static pci_ers_result_t pci_slot_reset(struct pci_dev *pdev) { struct hfi1_devdata *dd = pci_get_drvdata(pdev); dd_dev_info(dd, "HFI1 slot_reset function called, ignored\n"); return PCI_ERS_RESULT_CAN_RECOVER; } static void pci_resume(struct pci_dev *pdev) { struct hfi1_devdata *dd = pci_get_drvdata(pdev); dd_dev_info(dd, "HFI1 resume function called\n"); /* * Running jobs will fail, since it's asynchronous * unlike sysfs-requested reset. Better than * doing nothing. */ hfi1_init(dd, 1); /* same as re-init after reset */ } const struct pci_error_handlers hfi1_pci_err_handler = { .error_detected = pci_error_detected, .mmio_enabled = pci_mmio_enabled, .slot_reset = pci_slot_reset, .resume = pci_resume, }; /*============================================================================*/ /* PCIe Gen3 support */ /* * This code is separated out because it is expected to be removed in the * final shipping product. If not, then it will be revisited and items * will be moved to more standard locations. */ /* ASIC_PCI_SD_HOST_STATUS.FW_DNLD_STS field values */ #define DL_STATUS_HFI0 0x1 /* hfi0 firmware download complete */ #define DL_STATUS_HFI1 0x2 /* hfi1 firmware download complete */ #define DL_STATUS_BOTH 0x3 /* hfi0 and hfi1 firmware download complete */ /* ASIC_PCI_SD_HOST_STATUS.FW_DNLD_ERR field values */ #define DL_ERR_NONE 0x0 /* no error */ #define DL_ERR_SWAP_PARITY 0x1 /* parity error in SerDes interrupt */ /* or response data */ #define DL_ERR_DISABLED 0x2 /* hfi disabled */ #define DL_ERR_SECURITY 0x3 /* security check failed */ #define DL_ERR_SBUS 0x4 /* SBus status error */ #define DL_ERR_XFR_PARITY 0x5 /* parity error during ROM transfer*/ /* gasket block secondary bus reset delay */ #define SBR_DELAY_US 200000 /* 200ms */ static uint pcie_target = 3; module_param(pcie_target, uint, S_IRUGO); MODULE_PARM_DESC(pcie_target, "PCIe target speed (0 skip, 1-3 Gen1-3)"); static uint pcie_force; module_param(pcie_force, uint, S_IRUGO); MODULE_PARM_DESC(pcie_force, "Force driver to do a PCIe firmware download even if already at target speed"); static uint pcie_retry = 5; module_param(pcie_retry, uint, S_IRUGO); MODULE_PARM_DESC(pcie_retry, "Driver will try this many times to reach requested speed"); #define UNSET_PSET 255 #define DEFAULT_DISCRETE_PSET 2 /* discrete HFI */ #define DEFAULT_MCP_PSET 6 /* MCP HFI */ static uint pcie_pset = UNSET_PSET; module_param(pcie_pset, uint, S_IRUGO); MODULE_PARM_DESC(pcie_pset, "PCIe Eq Pset value to use, range is 0-10"); static uint pcie_ctle = 3; /* discrete on, integrated on */ module_param(pcie_ctle, uint, S_IRUGO); MODULE_PARM_DESC(pcie_ctle, "PCIe static CTLE mode, bit 0 - discrete on/off, bit 1 - integrated on/off"); /* equalization columns */ #define PREC 0 #define ATTN 1 #define POST 2 /* discrete silicon preliminary equalization values */ static const u8 discrete_preliminary_eq[11][3] = { /* prec attn post */ { 0x00, 0x00, 0x12 }, /* p0 */ { 0x00, 0x00, 0x0c }, /* p1 */ { 0x00, 0x00, 0x0f }, /* p2 */ { 0x00, 0x00, 0x09 }, /* p3 */ { 0x00, 0x00, 0x00 }, /* p4 */ { 0x06, 0x00, 0x00 }, /* p5 */ { 0x09, 0x00, 0x00 }, /* p6 */ { 0x06, 0x00, 0x0f }, /* p7 */ { 0x09, 0x00, 0x09 }, /* p8 */ { 0x0c, 0x00, 0x00 }, /* p9 */ { 0x00, 0x00, 0x18 }, /* p10 */ }; /* integrated silicon preliminary equalization values */ static const u8 integrated_preliminary_eq[11][3] = { /* prec attn post */ { 0x00, 0x1e, 0x07 }, /* p0 */ { 0x00, 0x1e, 0x05 }, /* p1 */ { 0x00, 0x1e, 0x06 }, /* p2 */ { 0x00, 0x1e, 0x04 }, /* p3 */ { 0x00, 0x1e, 0x00 }, /* p4 */ { 0x03, 0x1e, 0x00 }, /* p5 */ { 0x04, 0x1e, 0x00 }, /* p6 */ { 0x03, 0x1e, 0x06 }, /* p7 */ { 0x03, 0x1e, 0x04 }, /* p8 */ { 0x05, 0x1e, 0x00 }, /* p9 */ { 0x00, 0x1e, 0x0a }, /* p10 */ }; static const u8 discrete_ctle_tunings[11][4] = { /* DC LF HF BW */ { 0x48, 0x0b, 0x04, 0x04 }, /* p0 */ { 0x60, 0x05, 0x0f, 0x0a }, /* p1 */ { 0x50, 0x09, 0x06, 0x06 }, /* p2 */ { 0x68, 0x05, 0x0f, 0x0a }, /* p3 */ { 0x80, 0x05, 0x0f, 0x0a }, /* p4 */ { 0x70, 0x05, 0x0f, 0x0a }, /* p5 */ { 0x68, 0x05, 0x0f, 0x0a }, /* p6 */ { 0x38, 0x0f, 0x00, 0x00 }, /* p7 */ { 0x48, 0x09, 0x06, 0x06 }, /* p8 */ { 0x60, 0x05, 0x0f, 0x0a }, /* p9 */ { 0x38, 0x0f, 0x00, 0x00 }, /* p10 */ }; static const u8 integrated_ctle_tunings[11][4] = { /* DC LF HF BW */ { 0x38, 0x0f, 0x00, 0x00 }, /* p0 */ { 0x38, 0x0f, 0x00, 0x00 }, /* p1 */ { 0x38, 0x0f, 0x00, 0x00 }, /* p2 */ { 0x38, 0x0f, 0x00, 0x00 }, /* p3 */ { 0x58, 0x0a, 0x05, 0x05 }, /* p4 */ { 0x48, 0x0a, 0x05, 0x05 }, /* p5 */ { 0x40, 0x0a, 0x05, 0x05 }, /* p6 */ { 0x38, 0x0f, 0x00, 0x00 }, /* p7 */ { 0x38, 0x0f, 0x00, 0x00 }, /* p8 */ { 0x38, 0x09, 0x06, 0x06 }, /* p9 */ { 0x38, 0x0e, 0x01, 0x01 }, /* p10 */ }; /* helper to format the value to write to hardware */ #define eq_value(pre, curr, post) \ ((((u32)(pre)) << \ PCIE_CFG_REG_PL102_GEN3_EQ_PRE_CURSOR_PSET_SHIFT) \ | (((u32)(curr)) << PCIE_CFG_REG_PL102_GEN3_EQ_CURSOR_PSET_SHIFT) \ | (((u32)(post)) << \ PCIE_CFG_REG_PL102_GEN3_EQ_POST_CURSOR_PSET_SHIFT)) /* * Load the given EQ preset table into the PCIe hardware. */ static int load_eq_table(struct hfi1_devdata *dd, const u8 eq[11][3], u8 fs, u8 div) { struct pci_dev *pdev = dd->pcidev; u32 hit_error = 0; u32 violation; u32 i; u8 c_minus1, c0, c_plus1; int ret; for (i = 0; i < 11; i++) { /* set index */ pci_write_config_dword(pdev, PCIE_CFG_REG_PL103, i); /* write the value */ c_minus1 = eq[i][PREC] / div; c0 = fs - (eq[i][PREC] / div) - (eq[i][POST] / div); c_plus1 = eq[i][POST] / div; pci_write_config_dword(pdev, PCIE_CFG_REG_PL102, eq_value(c_minus1, c0, c_plus1)); /* check if these coefficients violate EQ rules */ ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_REG_PL105, &violation); if (ret) { dd_dev_err(dd, "Unable to read from PCI config\n"); hit_error = 1; break; } if (violation & PCIE_CFG_REG_PL105_GEN3_EQ_VIOLATE_COEF_RULES_SMASK){ if (hit_error == 0) { dd_dev_err(dd, "Gen3 EQ Table Coefficient rule violations\n"); dd_dev_err(dd, " prec attn post\n"); } dd_dev_err(dd, " p%02d: %02x %02x %02x\n", i, (u32)eq[i][0], (u32)eq[i][1], (u32)eq[i][2]); dd_dev_err(dd, " %02x %02x %02x\n", (u32)c_minus1, (u32)c0, (u32)c_plus1); hit_error = 1; } } if (hit_error) return -EINVAL; return 0; } /* * Steps to be done after the PCIe firmware is downloaded and * before the SBR for the Pcie Gen3. * The SBus resource is already being held. */ static void pcie_post_steps(struct hfi1_devdata *dd) { int i; set_sbus_fast_mode(dd); /* * Write to the PCIe PCSes to set the G3_LOCKED_NEXT bits to 1. * This avoids a spurious framing error that can otherwise be * generated by the MAC layer. * * Use individual addresses since no broadcast is set up. */ for (i = 0; i < NUM_PCIE_SERDES; i++) { sbus_request(dd, pcie_pcs_addrs[dd->hfi1_id][i], 0x03, WRITE_SBUS_RECEIVER, 0x00022132); } clear_sbus_fast_mode(dd); } /* * Trigger a secondary bus reset (SBR) on ourselves using our parent. * * Based on pci_parent_bus_reset() which is not exported by the * kernel core. */ static int trigger_sbr(struct hfi1_devdata *dd) { struct pci_dev *dev = dd->pcidev; struct pci_dev *pdev; /* need a parent */ if (!dev->bus->self) { dd_dev_err(dd, "%s: no parent device\n", __func__); return -ENOTTY; } /* should not be anyone else on the bus */ list_for_each_entry(pdev, &dev->bus->devices, bus_list) if (pdev != dev) { dd_dev_err(dd, "%s: another device is on the same bus\n", __func__); return -ENOTTY; } /* * This is an end around to do an SBR during probe time. A new API needs * to be implemented to have cleaner interface but this fixes the * current brokenness */ return pci_bridge_secondary_bus_reset(dev->bus->self); } /* * Write the given gasket interrupt register. */ static void write_gasket_interrupt(struct hfi1_devdata *dd, int index, u16 code, u16 data) { write_csr(dd, ASIC_PCIE_SD_INTRPT_LIST + (index * 8), (((u64)code << ASIC_PCIE_SD_INTRPT_LIST_INTRPT_CODE_SHIFT) | ((u64)data << ASIC_PCIE_SD_INTRPT_LIST_INTRPT_DATA_SHIFT))); } /* * Tell the gasket logic how to react to the reset. */ static void arm_gasket_logic(struct hfi1_devdata *dd) { u64 reg; reg = (((u64)1 << dd->hfi1_id) << ASIC_PCIE_SD_HOST_CMD_INTRPT_CMD_SHIFT) | ((u64)pcie_serdes_broadcast[dd->hfi1_id] << ASIC_PCIE_SD_HOST_CMD_SBUS_RCVR_ADDR_SHIFT | ASIC_PCIE_SD_HOST_CMD_SBR_MODE_SMASK | ((u64)SBR_DELAY_US & ASIC_PCIE_SD_HOST_CMD_TIMER_MASK) << ASIC_PCIE_SD_HOST_CMD_TIMER_SHIFT); write_csr(dd, ASIC_PCIE_SD_HOST_CMD, reg); /* read back to push the write */ read_csr(dd, ASIC_PCIE_SD_HOST_CMD); } /* * CCE_PCIE_CTRL long name helpers * We redefine these shorter macros to use in the code while leaving * chip_registers.h to be autogenerated from the hardware spec. */ #define LANE_BUNDLE_MASK CCE_PCIE_CTRL_PCIE_LANE_BUNDLE_MASK #define LANE_BUNDLE_SHIFT CCE_PCIE_CTRL_PCIE_LANE_BUNDLE_SHIFT #define LANE_DELAY_MASK CCE_PCIE_CTRL_PCIE_LANE_DELAY_MASK #define LANE_DELAY_SHIFT CCE_PCIE_CTRL_PCIE_LANE_DELAY_SHIFT #define MARGIN_OVERWRITE_ENABLE_SHIFT CCE_PCIE_CTRL_XMT_MARGIN_OVERWRITE_ENABLE_SHIFT #define MARGIN_SHIFT CCE_PCIE_CTRL_XMT_MARGIN_SHIFT #define MARGIN_G1_G2_OVERWRITE_MASK CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_OVERWRITE_ENABLE_MASK #define MARGIN_G1_G2_OVERWRITE_SHIFT CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_OVERWRITE_ENABLE_SHIFT #define MARGIN_GEN1_GEN2_MASK CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_MASK #define MARGIN_GEN1_GEN2_SHIFT CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_SHIFT /* * Write xmt_margin for full-swing (WFR-B) or half-swing (WFR-C). */ static void write_xmt_margin(struct hfi1_devdata *dd, const char *fname) { u64 pcie_ctrl; u64 xmt_margin; u64 xmt_margin_oe; u64 lane_delay; u64 lane_bundle; pcie_ctrl = read_csr(dd, CCE_PCIE_CTRL); /* * For Discrete, use full-swing. * - PCIe TX defaults to full-swing. * Leave this register as default. * For Integrated, use half-swing * - Copy xmt_margin and xmt_margin_oe * from Gen1/Gen2 to Gen3. */ if (dd->pcidev->device == PCI_DEVICE_ID_INTEL1) { /* integrated */ /* extract initial fields */ xmt_margin = (pcie_ctrl >> MARGIN_GEN1_GEN2_SHIFT) & MARGIN_GEN1_GEN2_MASK; xmt_margin_oe = (pcie_ctrl >> MARGIN_G1_G2_OVERWRITE_SHIFT) & MARGIN_G1_G2_OVERWRITE_MASK; lane_delay = (pcie_ctrl >> LANE_DELAY_SHIFT) & LANE_DELAY_MASK; lane_bundle = (pcie_ctrl >> LANE_BUNDLE_SHIFT) & LANE_BUNDLE_MASK; /* * For A0, EFUSE values are not set. Override with the * correct values. */ if (is_ax(dd)) { /* * xmt_margin and OverwiteEnabel should be the * same for Gen1/Gen2 and Gen3 */ xmt_margin = 0x5; xmt_margin_oe = 0x1; lane_delay = 0xF; /* Delay 240ns. */ lane_bundle = 0x0; /* Set to 1 lane. */ } /* overwrite existing values */ pcie_ctrl = (xmt_margin << MARGIN_GEN1_GEN2_SHIFT) | (xmt_margin_oe << MARGIN_G1_G2_OVERWRITE_SHIFT) | (xmt_margin << MARGIN_SHIFT) | (xmt_margin_oe << MARGIN_OVERWRITE_ENABLE_SHIFT) | (lane_delay << LANE_DELAY_SHIFT) | (lane_bundle << LANE_BUNDLE_SHIFT); write_csr(dd, CCE_PCIE_CTRL, pcie_ctrl); } dd_dev_dbg(dd, "%s: program XMT margin, CcePcieCtrl 0x%llx\n", fname, pcie_ctrl); } /* * Do all the steps needed to transition the PCIe link to Gen3 speed. */ int do_pcie_gen3_transition(struct hfi1_devdata *dd) { struct pci_dev *parent = dd->pcidev->bus->self; u64 fw_ctrl; u64 reg, therm; u32 reg32, fs, lf; u32 status, err; int ret; int do_retry, retry_count = 0; int intnum = 0; uint default_pset; uint pset = pcie_pset; u16 target_vector, target_speed; u16 lnkctl2, vendor; u8 div; const u8 (*eq)[3]; const u8 (*ctle_tunings)[4]; uint static_ctle_mode; int return_error = 0; u32 target_width; /* PCIe Gen3 is for the ASIC only */ if (dd->icode != ICODE_RTL_SILICON) return 0; if (pcie_target == 1) { /* target Gen1 */ target_vector = PCI_EXP_LNKCTL2_TLS_2_5GT; target_speed = 2500; } else if (pcie_target == 2) { /* target Gen2 */ target_vector = PCI_EXP_LNKCTL2_TLS_5_0GT; target_speed = 5000; } else if (pcie_target == 3) { /* target Gen3 */ target_vector = PCI_EXP_LNKCTL2_TLS_8_0GT; target_speed = 8000; } else { /* off or invalid target - skip */ dd_dev_info(dd, "%s: Skipping PCIe transition\n", __func__); return 0; } /* if already at target speed, done (unless forced) */ if (dd->lbus_speed == target_speed) { dd_dev_info(dd, "%s: PCIe already at gen%d, %s\n", __func__, pcie_target, pcie_force ? "re-doing anyway" : "skipping"); if (!pcie_force) return 0; } /* * The driver cannot do the transition if it has no access to the * upstream component */ if (!parent) { dd_dev_info(dd, "%s: No upstream, Can't do gen3 transition\n", __func__); return 0; } /* Previous Gen1/Gen2 bus width */ target_width = dd->lbus_width; /* * Do the Gen3 transition. Steps are those of the PCIe Gen3 * recipe. */ /* step 1: pcie link working in gen1/gen2 */ /* step 2: if either side is not capable of Gen3, done */ if (pcie_target == 3 && !dd->link_gen3_capable) { dd_dev_err(dd, "The PCIe link is not Gen3 capable\n"); ret = -ENOSYS; goto done_no_mutex; } /* hold the SBus resource across the firmware download and SBR */ ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT); if (ret) { dd_dev_err(dd, "%s: unable to acquire SBus resource\n", __func__); return ret; } /* make sure thermal polling is not causing interrupts */ therm = read_csr(dd, ASIC_CFG_THERM_POLL_EN); if (therm) { write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0); msleep(100); dd_dev_info(dd, "%s: Disabled therm polling\n", __func__); } retry: /* the SBus download will reset the spico for thermal */ /* step 3: download SBus Master firmware */ /* step 4: download PCIe Gen3 SerDes firmware */ dd_dev_info(dd, "%s: downloading firmware\n", __func__); ret = load_pcie_firmware(dd); if (ret) { /* do not proceed if the firmware cannot be downloaded */ return_error = 1; goto done; } /* step 5: set up device parameter settings */ dd_dev_info(dd, "%s: setting PCIe registers\n", __func__); /* * PcieCfgSpcie1 - Link Control 3 * Leave at reset value. No need to set PerfEq - link equalization * will be performed automatically after the SBR when the target * speed is 8GT/s. */ /* clear all 16 per-lane error bits (PCIe: Lane Error Status) */ pci_write_config_dword(dd->pcidev, PCIE_CFG_SPCIE2, 0xffff); /* step 5a: Set Synopsys Port Logic registers */ /* * PcieCfgRegPl2 - Port Force Link * * Set the low power field to 0x10 to avoid unnecessary power * management messages. All other fields are zero. */ reg32 = 0x10ul << PCIE_CFG_REG_PL2_LOW_PWR_ENT_CNT_SHIFT; pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL2, reg32); /* * PcieCfgRegPl100 - Gen3 Control * * turn off PcieCfgRegPl100.Gen3ZRxDcNonCompl * turn on PcieCfgRegPl100.EqEieosCnt * Everything else zero. */ reg32 = PCIE_CFG_REG_PL100_EQ_EIEOS_CNT_SMASK; pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL100, reg32); /* * PcieCfgRegPl101 - Gen3 EQ FS and LF * PcieCfgRegPl102 - Gen3 EQ Presets to Coefficients Mapping * PcieCfgRegPl103 - Gen3 EQ Preset Index * PcieCfgRegPl105 - Gen3 EQ Status * * Give initial EQ settings. */ if (dd->pcidev->device == PCI_DEVICE_ID_INTEL0) { /* discrete */ /* 1000mV, FS=24, LF = 8 */ fs = 24; lf = 8; div = 3; eq = discrete_preliminary_eq; default_pset = DEFAULT_DISCRETE_PSET; ctle_tunings = discrete_ctle_tunings; /* bit 0 - discrete on/off */ static_ctle_mode = pcie_ctle & 0x1; } else { /* 400mV, FS=29, LF = 9 */ fs = 29; lf = 9; div = 1; eq = integrated_preliminary_eq; default_pset = DEFAULT_MCP_PSET; ctle_tunings = integrated_ctle_tunings; /* bit 1 - integrated on/off */ static_ctle_mode = (pcie_ctle >> 1) & 0x1; } pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL101, (fs << PCIE_CFG_REG_PL101_GEN3_EQ_LOCAL_FS_SHIFT) | (lf << PCIE_CFG_REG_PL101_GEN3_EQ_LOCAL_LF_SHIFT)); ret = load_eq_table(dd, eq, fs, div); if (ret) goto done; /* * PcieCfgRegPl106 - Gen3 EQ Control * * Set Gen3EqPsetReqVec, leave other fields 0. */ if (pset == UNSET_PSET) pset = default_pset; if (pset > 10) { /* valid range is 0-10, inclusive */ dd_dev_err(dd, "%s: Invalid Eq Pset %u, setting to %d\n", __func__, pset, default_pset); pset = default_pset; } dd_dev_info(dd, "%s: using EQ Pset %u\n", __func__, pset); pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL106, ((1 << pset) << PCIE_CFG_REG_PL106_GEN3_EQ_PSET_REQ_VEC_SHIFT) | PCIE_CFG_REG_PL106_GEN3_EQ_EVAL2MS_DISABLE_SMASK | PCIE_CFG_REG_PL106_GEN3_EQ_PHASE23_EXIT_MODE_SMASK); /* * step 5b: Do post firmware download steps via SBus */ dd_dev_info(dd, "%s: doing pcie post steps\n", __func__); pcie_post_steps(dd); /* * step 5c: Program gasket interrupts */ /* set the Rx Bit Rate to REFCLK ratio */ write_gasket_interrupt(dd, intnum++, 0x0006, 0x0050); /* disable pCal for PCIe Gen3 RX equalization */ /* select adaptive or static CTLE */ write_gasket_interrupt(dd, intnum++, 0x0026, 0x5b01 | (static_ctle_mode << 3)); /* * Enable iCal for PCIe Gen3 RX equalization, and set which * evaluation of RX_EQ_EVAL will launch the iCal procedure. */ write_gasket_interrupt(dd, intnum++, 0x0026, 0x5202); if (static_ctle_mode) { /* apply static CTLE tunings */ u8 pcie_dc, pcie_lf, pcie_hf, pcie_bw; pcie_dc = ctle_tunings[pset][0]; pcie_lf = ctle_tunings[pset][1]; pcie_hf = ctle_tunings[pset][2]; pcie_bw = ctle_tunings[pset][3]; write_gasket_interrupt(dd, intnum++, 0x0026, 0x0200 | pcie_dc); write_gasket_interrupt(dd, intnum++, 0x0026, 0x0100 | pcie_lf); write_gasket_interrupt(dd, intnum++, 0x0026, 0x0000 | pcie_hf); write_gasket_interrupt(dd, intnum++, 0x0026, 0x5500 | pcie_bw); } /* terminate list */ write_gasket_interrupt(dd, intnum++, 0x0000, 0x0000); /* * step 5d: program XMT margin */ write_xmt_margin(dd, __func__); /* * step 5e: disable active state power management (ASPM). It * will be enabled if required later */ dd_dev_info(dd, "%s: clearing ASPM\n", __func__); aspm_hw_disable_l1(dd); /* * step 5f: clear DirectSpeedChange * PcieCfgRegPl67.DirectSpeedChange must be zero to prevent the * change in the speed target from starting before we are ready. * This field defaults to 0 and we are not changing it, so nothing * needs to be done. */ /* step 5g: Set target link speed */ /* * Set target link speed to be target on both device and parent. * On setting the parent: Some system BIOSs "helpfully" set the * parent target speed to Gen2 to match the ASIC's initial speed. * We can set the target Gen3 because we have already checked * that it is Gen3 capable earlier. */ dd_dev_info(dd, "%s: setting parent target link speed\n", __func__); ret = pcie_capability_read_word(parent, PCI_EXP_LNKCTL2, &lnkctl2); if (ret) { dd_dev_err(dd, "Unable to read from PCI config\n"); return_error = 1; goto done; } dd_dev_info(dd, "%s: ..old link control2: 0x%x\n", __func__, (u32)lnkctl2); /* only write to parent if target is not as high as ours */ if ((lnkctl2 & PCI_EXP_LNKCTL2_TLS) < target_vector) { lnkctl2 &= ~PCI_EXP_LNKCTL2_TLS; lnkctl2 |= target_vector; dd_dev_info(dd, "%s: ..new link control2: 0x%x\n", __func__, (u32)lnkctl2); ret = pcie_capability_write_word(parent, PCI_EXP_LNKCTL2, lnkctl2); if (ret) { dd_dev_err(dd, "Unable to write to PCI config\n"); return_error = 1; goto done; } } else { dd_dev_info(dd, "%s: ..target speed is OK\n", __func__); } dd_dev_info(dd, "%s: setting target link speed\n", __func__); ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKCTL2, &lnkctl2); if (ret) { dd_dev_err(dd, "Unable to read from PCI config\n"); return_error = 1; goto done; } dd_dev_info(dd, "%s: ..old link control2: 0x%x\n", __func__, (u32)lnkctl2); lnkctl2 &= ~PCI_EXP_LNKCTL2_TLS; lnkctl2 |= target_vector; dd_dev_info(dd, "%s: ..new link control2: 0x%x\n", __func__, (u32)lnkctl2); ret = pcie_capability_write_word(dd->pcidev, PCI_EXP_LNKCTL2, lnkctl2); if (ret) { dd_dev_err(dd, "Unable to write to PCI config\n"); return_error = 1; goto done; } /* step 5h: arm gasket logic */ /* hold DC in reset across the SBR */ write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK); (void)read_csr(dd, CCE_DC_CTRL); /* DC reset hold */ /* save firmware control across the SBR */ fw_ctrl = read_csr(dd, MISC_CFG_FW_CTRL); dd_dev_info(dd, "%s: arming gasket logic\n", __func__); arm_gasket_logic(dd); /* * step 6: quiesce PCIe link * The chip has already been reset, so there will be no traffic * from the chip. Linux has no easy way to enforce that it will * not try to access the device, so we just need to hope it doesn't * do it while we are doing the reset. */ /* * step 7: initiate the secondary bus reset (SBR) * step 8: hardware brings the links back up * step 9: wait for link speed transition to be complete */ dd_dev_info(dd, "%s: calling trigger_sbr\n", __func__); ret = trigger_sbr(dd); if (ret) goto done; /* step 10: decide what to do next */ /* check if we can read PCI space */ ret = pci_read_config_word(dd->pcidev, PCI_VENDOR_ID, &vendor); if (ret) { dd_dev_info(dd, "%s: read of VendorID failed after SBR, err %d\n", __func__, ret); return_error = 1; goto done; } if (vendor == 0xffff) { dd_dev_info(dd, "%s: VendorID is all 1s after SBR\n", __func__); return_error = 1; ret = -EIO; goto done; } /* restore PCI space registers we know were reset */ dd_dev_info(dd, "%s: calling restore_pci_variables\n", __func__); ret = restore_pci_variables(dd); if (ret) { dd_dev_err(dd, "%s: Could not restore PCI variables\n", __func__); return_error = 1; goto done; } /* restore firmware control */ write_csr(dd, MISC_CFG_FW_CTRL, fw_ctrl); /* * Check the gasket block status. * * This is the first CSR read after the SBR. If the read returns * all 1s (fails), the link did not make it back. * * Once we're sure we can read and write, clear the DC reset after * the SBR. Then check for any per-lane errors. Then look over * the status. */ reg = read_csr(dd, ASIC_PCIE_SD_HOST_STATUS); dd_dev_info(dd, "%s: gasket block status: 0x%llx\n", __func__, reg); if (reg == ~0ull) { /* PCIe read failed/timeout */ dd_dev_err(dd, "SBR failed - unable to read from device\n"); return_error = 1; ret = -ENOSYS; goto done; } /* clear the DC reset */ write_csr(dd, CCE_DC_CTRL, 0); /* Set the LED off */ setextled(dd, 0); /* check for any per-lane errors */ ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_SPCIE2, &reg32); if (ret) { dd_dev_err(dd, "Unable to read from PCI config\n"); return_error = 1; goto done; } dd_dev_info(dd, "%s: per-lane errors: 0x%x\n", __func__, reg32); /* extract status, look for our HFI */ status = (reg >> ASIC_PCIE_SD_HOST_STATUS_FW_DNLD_STS_SHIFT) & ASIC_PCIE_SD_HOST_STATUS_FW_DNLD_STS_MASK; if ((status & (1 << dd->hfi1_id)) == 0) { dd_dev_err(dd, "%s: gasket status 0x%x, expecting 0x%x\n", __func__, status, 1 << dd->hfi1_id); ret = -EIO; goto done; } /* extract error */ err = (reg >> ASIC_PCIE_SD_HOST_STATUS_FW_DNLD_ERR_SHIFT) & ASIC_PCIE_SD_HOST_STATUS_FW_DNLD_ERR_MASK; if (err) { dd_dev_err(dd, "%s: gasket error %d\n", __func__, err); ret = -EIO; goto done; } /* update our link information cache */ update_lbus_info(dd); dd_dev_info(dd, "%s: new speed and width: %s\n", __func__, dd->lbus_info); if (dd->lbus_speed != target_speed || dd->lbus_width < target_width) { /* not target */ /* maybe retry */ do_retry = retry_count < pcie_retry; dd_dev_err(dd, "PCIe link speed or width did not match target%s\n", do_retry ? ", retrying" : ""); retry_count++; if (do_retry) { msleep(100); /* allow time to settle */ goto retry; } ret = -EIO; } done: if (therm) { write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1); msleep(100); dd_dev_info(dd, "%s: Re-enable therm polling\n", __func__); } release_chip_resource(dd, CR_SBUS); done_no_mutex: /* return no error if it is OK to be at current speed */ if (ret && !return_error) { dd_dev_err(dd, "Proceeding at current speed PCIe speed\n"); ret = 0; } dd_dev_info(dd, "%s: done\n", __func__); return ret; }
linux-master
drivers/infiniband/hw/hfi1/pcie.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2020 Cornelis Networks, Inc. * Copyright(c) 2015-2020 Intel Corporation. */ #include <linux/poll.h> #include <linux/cdev.h> #include <linux/vmalloc.h> #include <linux/io.h> #include <linux/sched/mm.h> #include <linux/bitmap.h> #include <rdma/ib.h> #include "hfi.h" #include "pio.h" #include "device.h" #include "common.h" #include "trace.h" #include "mmu_rb.h" #include "user_sdma.h" #include "user_exp_rcv.h" #include "aspm.h" #undef pr_fmt #define pr_fmt(fmt) DRIVER_NAME ": " fmt #define SEND_CTXT_HALT_TIMEOUT 1000 /* msecs */ /* * File operation functions */ static int hfi1_file_open(struct inode *inode, struct file *fp); static int hfi1_file_close(struct inode *inode, struct file *fp); static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from); static __poll_t hfi1_poll(struct file *fp, struct poll_table_struct *pt); static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma); static u64 kvirt_to_phys(void *addr); static int assign_ctxt(struct hfi1_filedata *fd, unsigned long arg, u32 len); static void init_subctxts(struct hfi1_ctxtdata *uctxt, const struct hfi1_user_info *uinfo); static int init_user_ctxt(struct hfi1_filedata *fd, struct hfi1_ctxtdata *uctxt); static void user_init(struct hfi1_ctxtdata *uctxt); static int get_ctxt_info(struct hfi1_filedata *fd, unsigned long arg, u32 len); static int get_base_info(struct hfi1_filedata *fd, unsigned long arg, u32 len); static int user_exp_rcv_setup(struct hfi1_filedata *fd, unsigned long arg, u32 len); static int user_exp_rcv_clear(struct hfi1_filedata *fd, unsigned long arg, u32 len); static int user_exp_rcv_invalid(struct hfi1_filedata *fd, unsigned long arg, u32 len); static int setup_base_ctxt(struct hfi1_filedata *fd, struct hfi1_ctxtdata *uctxt); static int setup_subctxt(struct hfi1_ctxtdata *uctxt); static int find_sub_ctxt(struct hfi1_filedata *fd, const struct hfi1_user_info *uinfo); static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd, struct hfi1_user_info *uinfo, struct hfi1_ctxtdata **cd); static void deallocate_ctxt(struct hfi1_ctxtdata *uctxt); static __poll_t poll_urgent(struct file *fp, struct poll_table_struct *pt); static __poll_t poll_next(struct file *fp, struct poll_table_struct *pt); static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt, unsigned long arg); static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned long arg); static int ctxt_reset(struct hfi1_ctxtdata *uctxt); static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt, unsigned long arg); static vm_fault_t vma_fault(struct vm_fault *vmf); static long hfi1_file_ioctl(struct file *fp, unsigned int cmd, unsigned long arg); static const struct file_operations hfi1_file_ops = { .owner = THIS_MODULE, .write_iter = hfi1_write_iter, .open = hfi1_file_open, .release = hfi1_file_close, .unlocked_ioctl = hfi1_file_ioctl, .poll = hfi1_poll, .mmap = hfi1_file_mmap, .llseek = noop_llseek, }; static const struct vm_operations_struct vm_ops = { .fault = vma_fault, }; /* * Types of memories mapped into user processes' space */ enum mmap_types { PIO_BUFS = 1, PIO_BUFS_SOP, PIO_CRED, RCV_HDRQ, RCV_EGRBUF, UREGS, EVENTS, STATUS, RTAIL, SUBCTXT_UREGS, SUBCTXT_RCV_HDRQ, SUBCTXT_EGRBUF, SDMA_COMP }; /* * Masks and offsets defining the mmap tokens */ #define HFI1_MMAP_OFFSET_MASK 0xfffULL #define HFI1_MMAP_OFFSET_SHIFT 0 #define HFI1_MMAP_SUBCTXT_MASK 0xfULL #define HFI1_MMAP_SUBCTXT_SHIFT 12 #define HFI1_MMAP_CTXT_MASK 0xffULL #define HFI1_MMAP_CTXT_SHIFT 16 #define HFI1_MMAP_TYPE_MASK 0xfULL #define HFI1_MMAP_TYPE_SHIFT 24 #define HFI1_MMAP_MAGIC_MASK 0xffffffffULL #define HFI1_MMAP_MAGIC_SHIFT 32 #define HFI1_MMAP_MAGIC 0xdabbad00 #define HFI1_MMAP_TOKEN_SET(field, val) \ (((val) & HFI1_MMAP_##field##_MASK) << HFI1_MMAP_##field##_SHIFT) #define HFI1_MMAP_TOKEN_GET(field, token) \ (((token) >> HFI1_MMAP_##field##_SHIFT) & HFI1_MMAP_##field##_MASK) #define HFI1_MMAP_TOKEN(type, ctxt, subctxt, addr) \ (HFI1_MMAP_TOKEN_SET(MAGIC, HFI1_MMAP_MAGIC) | \ HFI1_MMAP_TOKEN_SET(TYPE, type) | \ HFI1_MMAP_TOKEN_SET(CTXT, ctxt) | \ HFI1_MMAP_TOKEN_SET(SUBCTXT, subctxt) | \ HFI1_MMAP_TOKEN_SET(OFFSET, (offset_in_page(addr)))) #define dbg(fmt, ...) \ pr_info(fmt, ##__VA_ARGS__) static inline int is_valid_mmap(u64 token) { return (HFI1_MMAP_TOKEN_GET(MAGIC, token) == HFI1_MMAP_MAGIC); } static int hfi1_file_open(struct inode *inode, struct file *fp) { struct hfi1_filedata *fd; struct hfi1_devdata *dd = container_of(inode->i_cdev, struct hfi1_devdata, user_cdev); if (!((dd->flags & HFI1_PRESENT) && dd->kregbase1)) return -EINVAL; if (!refcount_inc_not_zero(&dd->user_refcount)) return -ENXIO; /* The real work is performed later in assign_ctxt() */ fd = kzalloc(sizeof(*fd), GFP_KERNEL); if (!fd || init_srcu_struct(&fd->pq_srcu)) goto nomem; spin_lock_init(&fd->pq_rcu_lock); spin_lock_init(&fd->tid_lock); spin_lock_init(&fd->invalid_lock); fd->rec_cpu_num = -1; /* no cpu affinity by default */ fd->dd = dd; fp->private_data = fd; return 0; nomem: kfree(fd); fp->private_data = NULL; if (refcount_dec_and_test(&dd->user_refcount)) complete(&dd->user_comp); return -ENOMEM; } static long hfi1_file_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) { struct hfi1_filedata *fd = fp->private_data; struct hfi1_ctxtdata *uctxt = fd->uctxt; int ret = 0; int uval = 0; hfi1_cdbg(IOCTL, "IOCTL recv: 0x%x", cmd); if (cmd != HFI1_IOCTL_ASSIGN_CTXT && cmd != HFI1_IOCTL_GET_VERS && !uctxt) return -EINVAL; switch (cmd) { case HFI1_IOCTL_ASSIGN_CTXT: ret = assign_ctxt(fd, arg, _IOC_SIZE(cmd)); break; case HFI1_IOCTL_CTXT_INFO: ret = get_ctxt_info(fd, arg, _IOC_SIZE(cmd)); break; case HFI1_IOCTL_USER_INFO: ret = get_base_info(fd, arg, _IOC_SIZE(cmd)); break; case HFI1_IOCTL_CREDIT_UPD: if (uctxt) sc_return_credits(uctxt->sc); break; case HFI1_IOCTL_TID_UPDATE: ret = user_exp_rcv_setup(fd, arg, _IOC_SIZE(cmd)); break; case HFI1_IOCTL_TID_FREE: ret = user_exp_rcv_clear(fd, arg, _IOC_SIZE(cmd)); break; case HFI1_IOCTL_TID_INVAL_READ: ret = user_exp_rcv_invalid(fd, arg, _IOC_SIZE(cmd)); break; case HFI1_IOCTL_RECV_CTRL: ret = manage_rcvq(uctxt, fd->subctxt, arg); break; case HFI1_IOCTL_POLL_TYPE: if (get_user(uval, (int __user *)arg)) return -EFAULT; uctxt->poll_type = (typeof(uctxt->poll_type))uval; break; case HFI1_IOCTL_ACK_EVENT: ret = user_event_ack(uctxt, fd->subctxt, arg); break; case HFI1_IOCTL_SET_PKEY: ret = set_ctxt_pkey(uctxt, arg); break; case HFI1_IOCTL_CTXT_RESET: ret = ctxt_reset(uctxt); break; case HFI1_IOCTL_GET_VERS: uval = HFI1_USER_SWVERSION; if (put_user(uval, (int __user *)arg)) return -EFAULT; break; default: return -EINVAL; } return ret; } static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from) { struct hfi1_filedata *fd = kiocb->ki_filp->private_data; struct hfi1_user_sdma_pkt_q *pq; struct hfi1_user_sdma_comp_q *cq = fd->cq; int done = 0, reqs = 0; unsigned long dim = from->nr_segs; int idx; if (!HFI1_CAP_IS_KSET(SDMA)) return -EINVAL; if (!from->user_backed) return -EINVAL; idx = srcu_read_lock(&fd->pq_srcu); pq = srcu_dereference(fd->pq, &fd->pq_srcu); if (!cq || !pq) { srcu_read_unlock(&fd->pq_srcu, idx); return -EIO; } trace_hfi1_sdma_request(fd->dd, fd->uctxt->ctxt, fd->subctxt, dim); if (atomic_read(&pq->n_reqs) == pq->n_max_reqs) { srcu_read_unlock(&fd->pq_srcu, idx); return -ENOSPC; } while (dim) { const struct iovec *iov = iter_iov(from); int ret; unsigned long count = 0; ret = hfi1_user_sdma_process_request( fd, (struct iovec *)(iov + done), dim, &count); if (ret) { reqs = ret; break; } dim -= count; done += count; reqs++; } srcu_read_unlock(&fd->pq_srcu, idx); return reqs; } static inline void mmap_cdbg(u16 ctxt, u8 subctxt, u8 type, u8 mapio, u8 vmf, u64 memaddr, void *memvirt, dma_addr_t memdma, ssize_t memlen, struct vm_area_struct *vma) { hfi1_cdbg(PROC, "%u:%u type:%u io/vf/dma:%d/%d/%d, addr:0x%llx, len:%lu(%lu), flags:0x%lx", ctxt, subctxt, type, mapio, vmf, !!memdma, memaddr ?: (u64)memvirt, memlen, vma->vm_end - vma->vm_start, vma->vm_flags); } static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma) { struct hfi1_filedata *fd = fp->private_data; struct hfi1_ctxtdata *uctxt = fd->uctxt; struct hfi1_devdata *dd; unsigned long flags; u64 token = vma->vm_pgoff << PAGE_SHIFT, memaddr = 0; void *memvirt = NULL; dma_addr_t memdma = 0; u8 subctxt, mapio = 0, vmf = 0, type; ssize_t memlen = 0; int ret = 0; u16 ctxt; if (!is_valid_mmap(token) || !uctxt || !(vma->vm_flags & VM_SHARED)) { ret = -EINVAL; goto done; } dd = uctxt->dd; ctxt = HFI1_MMAP_TOKEN_GET(CTXT, token); subctxt = HFI1_MMAP_TOKEN_GET(SUBCTXT, token); type = HFI1_MMAP_TOKEN_GET(TYPE, token); if (ctxt != uctxt->ctxt || subctxt != fd->subctxt) { ret = -EINVAL; goto done; } /* * vm_pgoff is used as a buffer selector cookie. Always mmap from * the beginning. */ vma->vm_pgoff = 0; flags = vma->vm_flags; switch (type) { case PIO_BUFS: case PIO_BUFS_SOP: memaddr = ((dd->physaddr + TXE_PIO_SEND) + /* chip pio base */ (uctxt->sc->hw_context * BIT(16))) + /* 64K PIO space / ctxt */ (type == PIO_BUFS_SOP ? (TXE_PIO_SIZE / 2) : 0); /* sop? */ /* * Map only the amount allocated to the context, not the * entire available context's PIO space. */ memlen = PAGE_ALIGN(uctxt->sc->credits * PIO_BLOCK_SIZE); flags &= ~VM_MAYREAD; flags |= VM_DONTCOPY | VM_DONTEXPAND; vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); mapio = 1; break; case PIO_CRED: { u64 cr_page_offset; if (flags & VM_WRITE) { ret = -EPERM; goto done; } /* * The credit return location for this context could be on the * second or third page allocated for credit returns (if number * of enabled contexts > 64 and 128 respectively). */ cr_page_offset = ((u64)uctxt->sc->hw_free - (u64)dd->cr_base[uctxt->numa_id].va) & PAGE_MASK; memvirt = dd->cr_base[uctxt->numa_id].va + cr_page_offset; memdma = dd->cr_base[uctxt->numa_id].dma + cr_page_offset; memlen = PAGE_SIZE; flags &= ~VM_MAYWRITE; flags |= VM_DONTCOPY | VM_DONTEXPAND; /* * The driver has already allocated memory for credit * returns and programmed it into the chip. Has that * memory been flagged as non-cached? */ /* vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); */ break; } case RCV_HDRQ: memlen = rcvhdrq_size(uctxt); memvirt = uctxt->rcvhdrq; memdma = uctxt->rcvhdrq_dma; break; case RCV_EGRBUF: { unsigned long vm_start_save; unsigned long vm_end_save; int i; /* * The RcvEgr buffer need to be handled differently * as multiple non-contiguous pages need to be mapped * into the user process. */ memlen = uctxt->egrbufs.size; if ((vma->vm_end - vma->vm_start) != memlen) { dd_dev_err(dd, "Eager buffer map size invalid (%lu != %lu)\n", (vma->vm_end - vma->vm_start), memlen); ret = -EINVAL; goto done; } if (vma->vm_flags & VM_WRITE) { ret = -EPERM; goto done; } vm_flags_clear(vma, VM_MAYWRITE); /* * Mmap multiple separate allocations into a single vma. From * here, dma_mmap_coherent() calls dma_direct_mmap(), which * requires the mmap to exactly fill the vma starting at * vma_start. Adjust the vma start and end for each eager * buffer segment mapped. Restore the originals when done. */ vm_start_save = vma->vm_start; vm_end_save = vma->vm_end; vma->vm_end = vma->vm_start; for (i = 0 ; i < uctxt->egrbufs.numbufs; i++) { memlen = uctxt->egrbufs.buffers[i].len; memvirt = uctxt->egrbufs.buffers[i].addr; memdma = uctxt->egrbufs.buffers[i].dma; vma->vm_end += memlen; mmap_cdbg(ctxt, subctxt, type, mapio, vmf, memaddr, memvirt, memdma, memlen, vma); ret = dma_mmap_coherent(&dd->pcidev->dev, vma, memvirt, memdma, memlen); if (ret < 0) { vma->vm_start = vm_start_save; vma->vm_end = vm_end_save; goto done; } vma->vm_start += memlen; } vma->vm_start = vm_start_save; vma->vm_end = vm_end_save; ret = 0; goto done; } case UREGS: /* * Map only the page that contains this context's user * registers. */ memaddr = (unsigned long) (dd->physaddr + RXE_PER_CONTEXT_USER) + (uctxt->ctxt * RXE_PER_CONTEXT_SIZE); /* * TidFlow table is on the same page as the rest of the * user registers. */ memlen = PAGE_SIZE; flags |= VM_DONTCOPY | VM_DONTEXPAND; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); mapio = 1; break; case EVENTS: /* * Use the page where this context's flags are. User level * knows where it's own bitmap is within the page. */ memaddr = (unsigned long) (dd->events + uctxt_offset(uctxt)) & PAGE_MASK; memlen = PAGE_SIZE; /* * v3.7 removes VM_RESERVED but the effect is kept by * using VM_IO. */ flags |= VM_IO | VM_DONTEXPAND; vmf = 1; break; case STATUS: if (flags & VM_WRITE) { ret = -EPERM; goto done; } memaddr = kvirt_to_phys((void *)dd->status); memlen = PAGE_SIZE; flags |= VM_IO | VM_DONTEXPAND; break; case RTAIL: if (!HFI1_CAP_IS_USET(DMA_RTAIL)) { /* * If the memory allocation failed, the context alloc * also would have failed, so we would never get here */ ret = -EINVAL; goto done; } if ((flags & VM_WRITE) || !hfi1_rcvhdrtail_kvaddr(uctxt)) { ret = -EPERM; goto done; } memlen = PAGE_SIZE; memvirt = (void *)hfi1_rcvhdrtail_kvaddr(uctxt); memdma = uctxt->rcvhdrqtailaddr_dma; flags &= ~VM_MAYWRITE; break; case SUBCTXT_UREGS: memaddr = (u64)uctxt->subctxt_uregbase; memlen = PAGE_SIZE; flags |= VM_IO | VM_DONTEXPAND; vmf = 1; break; case SUBCTXT_RCV_HDRQ: memaddr = (u64)uctxt->subctxt_rcvhdr_base; memlen = rcvhdrq_size(uctxt) * uctxt->subctxt_cnt; flags |= VM_IO | VM_DONTEXPAND; vmf = 1; break; case SUBCTXT_EGRBUF: memaddr = (u64)uctxt->subctxt_rcvegrbuf; memlen = uctxt->egrbufs.size * uctxt->subctxt_cnt; flags |= VM_IO | VM_DONTEXPAND; flags &= ~VM_MAYWRITE; vmf = 1; break; case SDMA_COMP: { struct hfi1_user_sdma_comp_q *cq = fd->cq; if (!cq) { ret = -EFAULT; goto done; } memaddr = (u64)cq->comps; memlen = PAGE_ALIGN(sizeof(*cq->comps) * cq->nentries); flags |= VM_IO | VM_DONTEXPAND; vmf = 1; break; } default: ret = -EINVAL; break; } if ((vma->vm_end - vma->vm_start) != memlen) { hfi1_cdbg(PROC, "%u:%u Memory size mismatch %lu:%lu", uctxt->ctxt, fd->subctxt, (vma->vm_end - vma->vm_start), memlen); ret = -EINVAL; goto done; } vm_flags_reset(vma, flags); mmap_cdbg(ctxt, subctxt, type, mapio, vmf, memaddr, memvirt, memdma, memlen, vma); if (vmf) { vma->vm_pgoff = PFN_DOWN(memaddr); vma->vm_ops = &vm_ops; ret = 0; } else if (memdma) { ret = dma_mmap_coherent(&dd->pcidev->dev, vma, memvirt, memdma, memlen); } else if (mapio) { ret = io_remap_pfn_range(vma, vma->vm_start, PFN_DOWN(memaddr), memlen, vma->vm_page_prot); } else if (memvirt) { ret = remap_pfn_range(vma, vma->vm_start, PFN_DOWN(__pa(memvirt)), memlen, vma->vm_page_prot); } else { ret = remap_pfn_range(vma, vma->vm_start, PFN_DOWN(memaddr), memlen, vma->vm_page_prot); } done: return ret; } /* * Local (non-chip) user memory is not mapped right away but as it is * accessed by the user-level code. */ static vm_fault_t vma_fault(struct vm_fault *vmf) { struct page *page; page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT)); if (!page) return VM_FAULT_SIGBUS; get_page(page); vmf->page = page; return 0; } static __poll_t hfi1_poll(struct file *fp, struct poll_table_struct *pt) { struct hfi1_ctxtdata *uctxt; __poll_t pollflag; uctxt = ((struct hfi1_filedata *)fp->private_data)->uctxt; if (!uctxt) pollflag = EPOLLERR; else if (uctxt->poll_type == HFI1_POLL_TYPE_URGENT) pollflag = poll_urgent(fp, pt); else if (uctxt->poll_type == HFI1_POLL_TYPE_ANYRCV) pollflag = poll_next(fp, pt); else /* invalid */ pollflag = EPOLLERR; return pollflag; } static int hfi1_file_close(struct inode *inode, struct file *fp) { struct hfi1_filedata *fdata = fp->private_data; struct hfi1_ctxtdata *uctxt = fdata->uctxt; struct hfi1_devdata *dd = container_of(inode->i_cdev, struct hfi1_devdata, user_cdev); unsigned long flags, *ev; fp->private_data = NULL; if (!uctxt) goto done; hfi1_cdbg(PROC, "closing ctxt %u:%u", uctxt->ctxt, fdata->subctxt); flush_wc(); /* drain user sdma queue */ hfi1_user_sdma_free_queues(fdata, uctxt); /* release the cpu */ hfi1_put_proc_affinity(fdata->rec_cpu_num); /* clean up rcv side */ hfi1_user_exp_rcv_free(fdata); /* * fdata->uctxt is used in the above cleanup. It is not ready to be * removed until here. */ fdata->uctxt = NULL; hfi1_rcd_put(uctxt); /* * Clear any left over, unhandled events so the next process that * gets this context doesn't get confused. */ ev = dd->events + uctxt_offset(uctxt) + fdata->subctxt; *ev = 0; spin_lock_irqsave(&dd->uctxt_lock, flags); __clear_bit(fdata->subctxt, uctxt->in_use_ctxts); if (!bitmap_empty(uctxt->in_use_ctxts, HFI1_MAX_SHARED_CTXTS)) { spin_unlock_irqrestore(&dd->uctxt_lock, flags); goto done; } spin_unlock_irqrestore(&dd->uctxt_lock, flags); /* * Disable receive context and interrupt available, reset all * RcvCtxtCtrl bits to default values. */ hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS | HFI1_RCVCTRL_TIDFLOW_DIS | HFI1_RCVCTRL_INTRAVAIL_DIS | HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_ONE_PKT_EGR_DIS | HFI1_RCVCTRL_NO_RHQ_DROP_DIS | HFI1_RCVCTRL_NO_EGR_DROP_DIS | HFI1_RCVCTRL_URGENT_DIS, uctxt); /* Clear the context's J_KEY */ hfi1_clear_ctxt_jkey(dd, uctxt); /* * If a send context is allocated, reset context integrity * checks to default and disable the send context. */ if (uctxt->sc) { sc_disable(uctxt->sc); set_pio_integrity(uctxt->sc); } hfi1_free_ctxt_rcv_groups(uctxt); hfi1_clear_ctxt_pkey(dd, uctxt); uctxt->event_flags = 0; deallocate_ctxt(uctxt); done: if (refcount_dec_and_test(&dd->user_refcount)) complete(&dd->user_comp); cleanup_srcu_struct(&fdata->pq_srcu); kfree(fdata); return 0; } /* * Convert kernel *virtual* addresses to physical addresses. * This is used to vmalloc'ed addresses. */ static u64 kvirt_to_phys(void *addr) { struct page *page; u64 paddr = 0; page = vmalloc_to_page(addr); if (page) paddr = page_to_pfn(page) << PAGE_SHIFT; return paddr; } /** * complete_subctxt - complete sub-context info * @fd: valid filedata pointer * * Sub-context info can only be set up after the base context * has been completed. This is indicated by the clearing of the * HFI1_CTXT_BASE_UINIT bit. * * Wait for the bit to be cleared, and then complete the subcontext * initialization. * */ static int complete_subctxt(struct hfi1_filedata *fd) { int ret; unsigned long flags; /* * sub-context info can only be set up after the base context * has been completed. */ ret = wait_event_interruptible( fd->uctxt->wait, !test_bit(HFI1_CTXT_BASE_UNINIT, &fd->uctxt->event_flags)); if (test_bit(HFI1_CTXT_BASE_FAILED, &fd->uctxt->event_flags)) ret = -ENOMEM; /* Finish the sub-context init */ if (!ret) { fd->rec_cpu_num = hfi1_get_proc_affinity(fd->uctxt->numa_id); ret = init_user_ctxt(fd, fd->uctxt); } if (ret) { spin_lock_irqsave(&fd->dd->uctxt_lock, flags); __clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts); spin_unlock_irqrestore(&fd->dd->uctxt_lock, flags); hfi1_rcd_put(fd->uctxt); fd->uctxt = NULL; } return ret; } static int assign_ctxt(struct hfi1_filedata *fd, unsigned long arg, u32 len) { int ret; unsigned int swmajor; struct hfi1_ctxtdata *uctxt = NULL; struct hfi1_user_info uinfo; if (fd->uctxt) return -EINVAL; if (sizeof(uinfo) != len) return -EINVAL; if (copy_from_user(&uinfo, (void __user *)arg, sizeof(uinfo))) return -EFAULT; swmajor = uinfo.userversion >> 16; if (swmajor != HFI1_USER_SWMAJOR) return -ENODEV; if (uinfo.subctxt_cnt > HFI1_MAX_SHARED_CTXTS) return -EINVAL; /* * Acquire the mutex to protect against multiple creations of what * could be a shared base context. */ mutex_lock(&hfi1_mutex); /* * Get a sub context if available (fd->uctxt will be set). * ret < 0 error, 0 no context, 1 sub-context found */ ret = find_sub_ctxt(fd, &uinfo); /* * Allocate a base context if context sharing is not required or a * sub context wasn't found. */ if (!ret) ret = allocate_ctxt(fd, fd->dd, &uinfo, &uctxt); mutex_unlock(&hfi1_mutex); /* Depending on the context type, finish the appropriate init */ switch (ret) { case 0: ret = setup_base_ctxt(fd, uctxt); if (ret) deallocate_ctxt(uctxt); break; case 1: ret = complete_subctxt(fd); break; default: break; } return ret; } /** * match_ctxt - match context * @fd: valid filedata pointer * @uinfo: user info to compare base context with * @uctxt: context to compare uinfo to. * * Compare the given context with the given information to see if it * can be used for a sub context. */ static int match_ctxt(struct hfi1_filedata *fd, const struct hfi1_user_info *uinfo, struct hfi1_ctxtdata *uctxt) { struct hfi1_devdata *dd = fd->dd; unsigned long flags; u16 subctxt; /* Skip dynamically allocated kernel contexts */ if (uctxt->sc && (uctxt->sc->type == SC_KERNEL)) return 0; /* Skip ctxt if it doesn't match the requested one */ if (memcmp(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid)) || uctxt->jkey != generate_jkey(current_uid()) || uctxt->subctxt_id != uinfo->subctxt_id || uctxt->subctxt_cnt != uinfo->subctxt_cnt) return 0; /* Verify the sharing process matches the base */ if (uctxt->userversion != uinfo->userversion) return -EINVAL; /* Find an unused sub context */ spin_lock_irqsave(&dd->uctxt_lock, flags); if (bitmap_empty(uctxt->in_use_ctxts, HFI1_MAX_SHARED_CTXTS)) { /* context is being closed, do not use */ spin_unlock_irqrestore(&dd->uctxt_lock, flags); return 0; } subctxt = find_first_zero_bit(uctxt->in_use_ctxts, HFI1_MAX_SHARED_CTXTS); if (subctxt >= uctxt->subctxt_cnt) { spin_unlock_irqrestore(&dd->uctxt_lock, flags); return -EBUSY; } fd->subctxt = subctxt; __set_bit(fd->subctxt, uctxt->in_use_ctxts); spin_unlock_irqrestore(&dd->uctxt_lock, flags); fd->uctxt = uctxt; hfi1_rcd_get(uctxt); return 1; } /** * find_sub_ctxt - fund sub-context * @fd: valid filedata pointer * @uinfo: matching info to use to find a possible context to share. * * The hfi1_mutex must be held when this function is called. It is * necessary to ensure serialized creation of shared contexts. * * Return: * 0 No sub-context found * 1 Subcontext found and allocated * errno EINVAL (incorrect parameters) * EBUSY (all sub contexts in use) */ static int find_sub_ctxt(struct hfi1_filedata *fd, const struct hfi1_user_info *uinfo) { struct hfi1_ctxtdata *uctxt; struct hfi1_devdata *dd = fd->dd; u16 i; int ret; if (!uinfo->subctxt_cnt) return 0; for (i = dd->first_dyn_alloc_ctxt; i < dd->num_rcv_contexts; i++) { uctxt = hfi1_rcd_get_by_index(dd, i); if (uctxt) { ret = match_ctxt(fd, uinfo, uctxt); hfi1_rcd_put(uctxt); /* value of != 0 will return */ if (ret) return ret; } } return 0; } static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd, struct hfi1_user_info *uinfo, struct hfi1_ctxtdata **rcd) { struct hfi1_ctxtdata *uctxt; int ret, numa; if (dd->flags & HFI1_FROZEN) { /* * Pick an error that is unique from all other errors * that are returned so the user process knows that * it tried to allocate while the SPC was frozen. It * it should be able to retry with success in a short * while. */ return -EIO; } if (!dd->freectxts) return -EBUSY; /* * If we don't have a NUMA node requested, preference is towards * device NUMA node. */ fd->rec_cpu_num = hfi1_get_proc_affinity(dd->node); if (fd->rec_cpu_num != -1) numa = cpu_to_node(fd->rec_cpu_num); else numa = numa_node_id(); ret = hfi1_create_ctxtdata(dd->pport, numa, &uctxt); if (ret < 0) { dd_dev_err(dd, "user ctxtdata allocation failed\n"); return ret; } hfi1_cdbg(PROC, "[%u:%u] pid %u assigned to CPU %d (NUMA %u)", uctxt->ctxt, fd->subctxt, current->pid, fd->rec_cpu_num, uctxt->numa_id); /* * Allocate and enable a PIO send context. */ uctxt->sc = sc_alloc(dd, SC_USER, uctxt->rcvhdrqentsize, dd->node); if (!uctxt->sc) { ret = -ENOMEM; goto ctxdata_free; } hfi1_cdbg(PROC, "allocated send context %u(%u)", uctxt->sc->sw_index, uctxt->sc->hw_context); ret = sc_enable(uctxt->sc); if (ret) goto ctxdata_free; /* * Setup sub context information if the user-level has requested * sub contexts. * This has to be done here so the rest of the sub-contexts find the * proper base context. * NOTE: _set_bit() can be used here because the context creation is * protected by the mutex (rather than the spin_lock), and will be the * very first instance of this context. */ __set_bit(0, uctxt->in_use_ctxts); if (uinfo->subctxt_cnt) init_subctxts(uctxt, uinfo); uctxt->userversion = uinfo->userversion; uctxt->flags = hfi1_cap_mask; /* save current flag state */ init_waitqueue_head(&uctxt->wait); strscpy(uctxt->comm, current->comm, sizeof(uctxt->comm)); memcpy(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid)); uctxt->jkey = generate_jkey(current_uid()); hfi1_stats.sps_ctxts++; /* * Disable ASPM when there are open user/PSM contexts to avoid * issues with ASPM L1 exit latency */ if (dd->freectxts-- == dd->num_user_contexts) aspm_disable_all(dd); *rcd = uctxt; return 0; ctxdata_free: hfi1_free_ctxt(uctxt); return ret; } static void deallocate_ctxt(struct hfi1_ctxtdata *uctxt) { mutex_lock(&hfi1_mutex); hfi1_stats.sps_ctxts--; if (++uctxt->dd->freectxts == uctxt->dd->num_user_contexts) aspm_enable_all(uctxt->dd); mutex_unlock(&hfi1_mutex); hfi1_free_ctxt(uctxt); } static void init_subctxts(struct hfi1_ctxtdata *uctxt, const struct hfi1_user_info *uinfo) { uctxt->subctxt_cnt = uinfo->subctxt_cnt; uctxt->subctxt_id = uinfo->subctxt_id; set_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags); } static int setup_subctxt(struct hfi1_ctxtdata *uctxt) { int ret = 0; u16 num_subctxts = uctxt->subctxt_cnt; uctxt->subctxt_uregbase = vmalloc_user(PAGE_SIZE); if (!uctxt->subctxt_uregbase) return -ENOMEM; /* We can take the size of the RcvHdr Queue from the master */ uctxt->subctxt_rcvhdr_base = vmalloc_user(rcvhdrq_size(uctxt) * num_subctxts); if (!uctxt->subctxt_rcvhdr_base) { ret = -ENOMEM; goto bail_ureg; } uctxt->subctxt_rcvegrbuf = vmalloc_user(uctxt->egrbufs.size * num_subctxts); if (!uctxt->subctxt_rcvegrbuf) { ret = -ENOMEM; goto bail_rhdr; } return 0; bail_rhdr: vfree(uctxt->subctxt_rcvhdr_base); uctxt->subctxt_rcvhdr_base = NULL; bail_ureg: vfree(uctxt->subctxt_uregbase); uctxt->subctxt_uregbase = NULL; return ret; } static void user_init(struct hfi1_ctxtdata *uctxt) { unsigned int rcvctrl_ops = 0; /* initialize poll variables... */ uctxt->urgent = 0; uctxt->urgent_poll = 0; /* * Now enable the ctxt for receive. * For chips that are set to DMA the tail register to memory * when they change (and when the update bit transitions from * 0 to 1. So for those chips, we turn it off and then back on. * This will (very briefly) affect any other open ctxts, but the * duration is very short, and therefore isn't an issue. We * explicitly set the in-memory tail copy to 0 beforehand, so we * don't have to wait to be sure the DMA update has happened * (chip resets head/tail to 0 on transition to enable). */ if (hfi1_rcvhdrtail_kvaddr(uctxt)) clear_rcvhdrtail(uctxt); /* Setup J_KEY before enabling the context */ hfi1_set_ctxt_jkey(uctxt->dd, uctxt, uctxt->jkey); rcvctrl_ops = HFI1_RCVCTRL_CTXT_ENB; rcvctrl_ops |= HFI1_RCVCTRL_URGENT_ENB; if (HFI1_CAP_UGET_MASK(uctxt->flags, HDRSUPP)) rcvctrl_ops |= HFI1_RCVCTRL_TIDFLOW_ENB; /* * Ignore the bit in the flags for now until proper * support for multiple packet per rcv array entry is * added. */ if (!HFI1_CAP_UGET_MASK(uctxt->flags, MULTI_PKT_EGR)) rcvctrl_ops |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB; if (HFI1_CAP_UGET_MASK(uctxt->flags, NODROP_EGR_FULL)) rcvctrl_ops |= HFI1_RCVCTRL_NO_EGR_DROP_ENB; if (HFI1_CAP_UGET_MASK(uctxt->flags, NODROP_RHQ_FULL)) rcvctrl_ops |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB; /* * The RcvCtxtCtrl.TailUpd bit has to be explicitly written. * We can't rely on the correct value to be set from prior * uses of the chip or ctxt. Therefore, add the rcvctrl op * for both cases. */ if (HFI1_CAP_UGET_MASK(uctxt->flags, DMA_RTAIL)) rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB; else rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_DIS; hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt); } static int get_ctxt_info(struct hfi1_filedata *fd, unsigned long arg, u32 len) { struct hfi1_ctxt_info cinfo; struct hfi1_ctxtdata *uctxt = fd->uctxt; if (sizeof(cinfo) != len) return -EINVAL; memset(&cinfo, 0, sizeof(cinfo)); cinfo.runtime_flags = (((uctxt->flags >> HFI1_CAP_MISC_SHIFT) & HFI1_CAP_MISC_MASK) << HFI1_CAP_USER_SHIFT) | HFI1_CAP_UGET_MASK(uctxt->flags, MASK) | HFI1_CAP_KGET_MASK(uctxt->flags, K2U); /* adjust flag if this fd is not able to cache */ if (!fd->use_mn) cinfo.runtime_flags |= HFI1_CAP_TID_UNMAP; /* no caching */ cinfo.num_active = hfi1_count_active_units(); cinfo.unit = uctxt->dd->unit; cinfo.ctxt = uctxt->ctxt; cinfo.subctxt = fd->subctxt; cinfo.rcvtids = roundup(uctxt->egrbufs.alloced, uctxt->dd->rcv_entries.group_size) + uctxt->expected_count; cinfo.credits = uctxt->sc->credits; cinfo.numa_node = uctxt->numa_id; cinfo.rec_cpu = fd->rec_cpu_num; cinfo.send_ctxt = uctxt->sc->hw_context; cinfo.egrtids = uctxt->egrbufs.alloced; cinfo.rcvhdrq_cnt = get_hdrq_cnt(uctxt); cinfo.rcvhdrq_entsize = get_hdrqentsize(uctxt) << 2; cinfo.sdma_ring_size = fd->cq->nentries; cinfo.rcvegr_size = uctxt->egrbufs.rcvtid_size; trace_hfi1_ctxt_info(uctxt->dd, uctxt->ctxt, fd->subctxt, &cinfo); if (copy_to_user((void __user *)arg, &cinfo, len)) return -EFAULT; return 0; } static int init_user_ctxt(struct hfi1_filedata *fd, struct hfi1_ctxtdata *uctxt) { int ret; ret = hfi1_user_sdma_alloc_queues(uctxt, fd); if (ret) return ret; ret = hfi1_user_exp_rcv_init(fd, uctxt); if (ret) hfi1_user_sdma_free_queues(fd, uctxt); return ret; } static int setup_base_ctxt(struct hfi1_filedata *fd, struct hfi1_ctxtdata *uctxt) { struct hfi1_devdata *dd = uctxt->dd; int ret = 0; hfi1_init_ctxt(uctxt->sc); /* Now allocate the RcvHdr queue and eager buffers. */ ret = hfi1_create_rcvhdrq(dd, uctxt); if (ret) goto done; ret = hfi1_setup_eagerbufs(uctxt); if (ret) goto done; /* If sub-contexts are enabled, do the appropriate setup */ if (uctxt->subctxt_cnt) ret = setup_subctxt(uctxt); if (ret) goto done; ret = hfi1_alloc_ctxt_rcv_groups(uctxt); if (ret) goto done; ret = init_user_ctxt(fd, uctxt); if (ret) { hfi1_free_ctxt_rcv_groups(uctxt); goto done; } user_init(uctxt); /* Now that the context is set up, the fd can get a reference. */ fd->uctxt = uctxt; hfi1_rcd_get(uctxt); done: if (uctxt->subctxt_cnt) { /* * On error, set the failed bit so sub-contexts will clean up * correctly. */ if (ret) set_bit(HFI1_CTXT_BASE_FAILED, &uctxt->event_flags); /* * Base context is done (successfully or not), notify anybody * using a sub-context that is waiting for this completion. */ clear_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags); wake_up(&uctxt->wait); } return ret; } static int get_base_info(struct hfi1_filedata *fd, unsigned long arg, u32 len) { struct hfi1_base_info binfo; struct hfi1_ctxtdata *uctxt = fd->uctxt; struct hfi1_devdata *dd = uctxt->dd; unsigned offset; trace_hfi1_uctxtdata(uctxt->dd, uctxt, fd->subctxt); if (sizeof(binfo) != len) return -EINVAL; memset(&binfo, 0, sizeof(binfo)); binfo.hw_version = dd->revision; binfo.sw_version = HFI1_USER_SWVERSION; binfo.bthqp = RVT_KDETH_QP_PREFIX; binfo.jkey = uctxt->jkey; /* * If more than 64 contexts are enabled the allocated credit * return will span two or three contiguous pages. Since we only * map the page containing the context's credit return address, * we need to calculate the offset in the proper page. */ offset = ((u64)uctxt->sc->hw_free - (u64)dd->cr_base[uctxt->numa_id].va) % PAGE_SIZE; binfo.sc_credits_addr = HFI1_MMAP_TOKEN(PIO_CRED, uctxt->ctxt, fd->subctxt, offset); binfo.pio_bufbase = HFI1_MMAP_TOKEN(PIO_BUFS, uctxt->ctxt, fd->subctxt, uctxt->sc->base_addr); binfo.pio_bufbase_sop = HFI1_MMAP_TOKEN(PIO_BUFS_SOP, uctxt->ctxt, fd->subctxt, uctxt->sc->base_addr); binfo.rcvhdr_bufbase = HFI1_MMAP_TOKEN(RCV_HDRQ, uctxt->ctxt, fd->subctxt, uctxt->rcvhdrq); binfo.rcvegr_bufbase = HFI1_MMAP_TOKEN(RCV_EGRBUF, uctxt->ctxt, fd->subctxt, uctxt->egrbufs.rcvtids[0].dma); binfo.sdma_comp_bufbase = HFI1_MMAP_TOKEN(SDMA_COMP, uctxt->ctxt, fd->subctxt, 0); /* * user regs are at * (RXE_PER_CONTEXT_USER + (ctxt * RXE_PER_CONTEXT_SIZE)) */ binfo.user_regbase = HFI1_MMAP_TOKEN(UREGS, uctxt->ctxt, fd->subctxt, 0); offset = offset_in_page((uctxt_offset(uctxt) + fd->subctxt) * sizeof(*dd->events)); binfo.events_bufbase = HFI1_MMAP_TOKEN(EVENTS, uctxt->ctxt, fd->subctxt, offset); binfo.status_bufbase = HFI1_MMAP_TOKEN(STATUS, uctxt->ctxt, fd->subctxt, dd->status); if (HFI1_CAP_IS_USET(DMA_RTAIL)) binfo.rcvhdrtail_base = HFI1_MMAP_TOKEN(RTAIL, uctxt->ctxt, fd->subctxt, 0); if (uctxt->subctxt_cnt) { binfo.subctxt_uregbase = HFI1_MMAP_TOKEN(SUBCTXT_UREGS, uctxt->ctxt, fd->subctxt, 0); binfo.subctxt_rcvhdrbuf = HFI1_MMAP_TOKEN(SUBCTXT_RCV_HDRQ, uctxt->ctxt, fd->subctxt, 0); binfo.subctxt_rcvegrbuf = HFI1_MMAP_TOKEN(SUBCTXT_EGRBUF, uctxt->ctxt, fd->subctxt, 0); } if (copy_to_user((void __user *)arg, &binfo, len)) return -EFAULT; return 0; } /** * user_exp_rcv_setup - Set up the given tid rcv list * @fd: file data of the current driver instance * @arg: ioctl argumnent for user space information * @len: length of data structure associated with ioctl command * * Wrapper to validate ioctl information before doing _rcv_setup. * */ static int user_exp_rcv_setup(struct hfi1_filedata *fd, unsigned long arg, u32 len) { int ret; unsigned long addr; struct hfi1_tid_info tinfo; if (sizeof(tinfo) != len) return -EINVAL; if (copy_from_user(&tinfo, (void __user *)arg, (sizeof(tinfo)))) return -EFAULT; ret = hfi1_user_exp_rcv_setup(fd, &tinfo); if (!ret) { /* * Copy the number of tidlist entries we used * and the length of the buffer we registered. */ addr = arg + offsetof(struct hfi1_tid_info, tidcnt); if (copy_to_user((void __user *)addr, &tinfo.tidcnt, sizeof(tinfo.tidcnt))) ret = -EFAULT; addr = arg + offsetof(struct hfi1_tid_info, length); if (!ret && copy_to_user((void __user *)addr, &tinfo.length, sizeof(tinfo.length))) ret = -EFAULT; if (ret) hfi1_user_exp_rcv_invalid(fd, &tinfo); } return ret; } /** * user_exp_rcv_clear - Clear the given tid rcv list * @fd: file data of the current driver instance * @arg: ioctl argumnent for user space information * @len: length of data structure associated with ioctl command * * The hfi1_user_exp_rcv_clear() can be called from the error path. Because * of this, we need to use this wrapper to copy the user space information * before doing the clear. */ static int user_exp_rcv_clear(struct hfi1_filedata *fd, unsigned long arg, u32 len) { int ret; unsigned long addr; struct hfi1_tid_info tinfo; if (sizeof(tinfo) != len) return -EINVAL; if (copy_from_user(&tinfo, (void __user *)arg, (sizeof(tinfo)))) return -EFAULT; ret = hfi1_user_exp_rcv_clear(fd, &tinfo); if (!ret) { addr = arg + offsetof(struct hfi1_tid_info, tidcnt); if (copy_to_user((void __user *)addr, &tinfo.tidcnt, sizeof(tinfo.tidcnt))) return -EFAULT; } return ret; } /** * user_exp_rcv_invalid - Invalidate the given tid rcv list * @fd: file data of the current driver instance * @arg: ioctl argumnent for user space information * @len: length of data structure associated with ioctl command * * Wrapper to validate ioctl information before doing _rcv_invalid. * */ static int user_exp_rcv_invalid(struct hfi1_filedata *fd, unsigned long arg, u32 len) { int ret; unsigned long addr; struct hfi1_tid_info tinfo; if (sizeof(tinfo) != len) return -EINVAL; if (!fd->invalid_tids) return -EINVAL; if (copy_from_user(&tinfo, (void __user *)arg, (sizeof(tinfo)))) return -EFAULT; ret = hfi1_user_exp_rcv_invalid(fd, &tinfo); if (ret) return ret; addr = arg + offsetof(struct hfi1_tid_info, tidcnt); if (copy_to_user((void __user *)addr, &tinfo.tidcnt, sizeof(tinfo.tidcnt))) ret = -EFAULT; return ret; } static __poll_t poll_urgent(struct file *fp, struct poll_table_struct *pt) { struct hfi1_filedata *fd = fp->private_data; struct hfi1_ctxtdata *uctxt = fd->uctxt; struct hfi1_devdata *dd = uctxt->dd; __poll_t pollflag; poll_wait(fp, &uctxt->wait, pt); spin_lock_irq(&dd->uctxt_lock); if (uctxt->urgent != uctxt->urgent_poll) { pollflag = EPOLLIN | EPOLLRDNORM; uctxt->urgent_poll = uctxt->urgent; } else { pollflag = 0; set_bit(HFI1_CTXT_WAITING_URG, &uctxt->event_flags); } spin_unlock_irq(&dd->uctxt_lock); return pollflag; } static __poll_t poll_next(struct file *fp, struct poll_table_struct *pt) { struct hfi1_filedata *fd = fp->private_data; struct hfi1_ctxtdata *uctxt = fd->uctxt; struct hfi1_devdata *dd = uctxt->dd; __poll_t pollflag; poll_wait(fp, &uctxt->wait, pt); spin_lock_irq(&dd->uctxt_lock); if (hdrqempty(uctxt)) { set_bit(HFI1_CTXT_WAITING_RCV, &uctxt->event_flags); hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_ENB, uctxt); pollflag = 0; } else { pollflag = EPOLLIN | EPOLLRDNORM; } spin_unlock_irq(&dd->uctxt_lock); return pollflag; } /* * Find all user contexts in use, and set the specified bit in their * event mask. * See also find_ctxt() for a similar use, that is specific to send buffers. */ int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit) { struct hfi1_ctxtdata *uctxt; struct hfi1_devdata *dd = ppd->dd; u16 ctxt; if (!dd->events) return -EINVAL; for (ctxt = dd->first_dyn_alloc_ctxt; ctxt < dd->num_rcv_contexts; ctxt++) { uctxt = hfi1_rcd_get_by_index(dd, ctxt); if (uctxt) { unsigned long *evs; int i; /* * subctxt_cnt is 0 if not shared, so do base * separately, first, then remaining subctxt, if any */ evs = dd->events + uctxt_offset(uctxt); set_bit(evtbit, evs); for (i = 1; i < uctxt->subctxt_cnt; i++) set_bit(evtbit, evs + i); hfi1_rcd_put(uctxt); } } return 0; } /** * manage_rcvq - manage a context's receive queue * @uctxt: the context * @subctxt: the sub-context * @arg: start/stop action to carry out * * start_stop == 0 disables receive on the context, for use in queue * overflow conditions. start_stop==1 re-enables, to be used to * re-init the software copy of the head register */ static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt, unsigned long arg) { struct hfi1_devdata *dd = uctxt->dd; unsigned int rcvctrl_op; int start_stop; if (subctxt) return 0; if (get_user(start_stop, (int __user *)arg)) return -EFAULT; /* atomically clear receive enable ctxt. */ if (start_stop) { /* * On enable, force in-memory copy of the tail register to * 0, so that protocol code doesn't have to worry about * whether or not the chip has yet updated the in-memory * copy or not on return from the system call. The chip * always resets it's tail register back to 0 on a * transition from disabled to enabled. */ if (hfi1_rcvhdrtail_kvaddr(uctxt)) clear_rcvhdrtail(uctxt); rcvctrl_op = HFI1_RCVCTRL_CTXT_ENB; } else { rcvctrl_op = HFI1_RCVCTRL_CTXT_DIS; } hfi1_rcvctrl(dd, rcvctrl_op, uctxt); /* always; new head should be equal to new tail; see above */ return 0; } /* * clear the event notifier events for this context. * User process then performs actions appropriate to bit having been * set, if desired, and checks again in future. */ static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt, unsigned long arg) { int i; struct hfi1_devdata *dd = uctxt->dd; unsigned long *evs; unsigned long events; if (!dd->events) return 0; if (get_user(events, (unsigned long __user *)arg)) return -EFAULT; evs = dd->events + uctxt_offset(uctxt) + subctxt; for (i = 0; i <= _HFI1_MAX_EVENT_BIT; i++) { if (!test_bit(i, &events)) continue; clear_bit(i, evs); } return 0; } static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned long arg) { int i; struct hfi1_pportdata *ppd = uctxt->ppd; struct hfi1_devdata *dd = uctxt->dd; u16 pkey; if (!HFI1_CAP_IS_USET(PKEY_CHECK)) return -EPERM; if (get_user(pkey, (u16 __user *)arg)) return -EFAULT; if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY) return -EINVAL; for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) if (pkey == ppd->pkeys[i]) return hfi1_set_ctxt_pkey(dd, uctxt, pkey); return -ENOENT; } /** * ctxt_reset - Reset the user context * @uctxt: valid user context */ static int ctxt_reset(struct hfi1_ctxtdata *uctxt) { struct send_context *sc; struct hfi1_devdata *dd; int ret = 0; if (!uctxt || !uctxt->dd || !uctxt->sc) return -EINVAL; /* * There is no protection here. User level has to guarantee that * no one will be writing to the send context while it is being * re-initialized. If user level breaks that guarantee, it will * break it's own context and no one else's. */ dd = uctxt->dd; sc = uctxt->sc; /* * Wait until the interrupt handler has marked the context as * halted or frozen. Report error if we time out. */ wait_event_interruptible_timeout( sc->halt_wait, (sc->flags & SCF_HALTED), msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT)); if (!(sc->flags & SCF_HALTED)) return -ENOLCK; /* * If the send context was halted due to a Freeze, wait until the * device has been "unfrozen" before resetting the context. */ if (sc->flags & SCF_FROZEN) { wait_event_interruptible_timeout( dd->event_queue, !(READ_ONCE(dd->flags) & HFI1_FROZEN), msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT)); if (dd->flags & HFI1_FROZEN) return -ENOLCK; if (dd->flags & HFI1_FORCED_FREEZE) /* * Don't allow context reset if we are into * forced freeze */ return -ENODEV; sc_disable(sc); ret = sc_enable(sc); hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, uctxt); } else { ret = sc_restart(sc); } if (!ret) sc_return_credits(sc); return ret; } static void user_remove(struct hfi1_devdata *dd) { hfi1_cdev_cleanup(&dd->user_cdev, &dd->user_device); } static int user_add(struct hfi1_devdata *dd) { char name[10]; int ret; snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit); ret = hfi1_cdev_init(dd->unit, name, &hfi1_file_ops, &dd->user_cdev, &dd->user_device, true, &dd->verbs_dev.rdi.ibdev.dev.kobj); if (ret) user_remove(dd); return ret; } /* * Create per-unit files in /dev */ int hfi1_device_create(struct hfi1_devdata *dd) { return user_add(dd); } /* * Remove per-unit files in /dev * void, core kernel returns no errors for this stuff */ void hfi1_device_remove(struct hfi1_devdata *dd) { user_remove(dd); }
linux-master
drivers/infiniband/hw/hfi1/file_ops.c
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) /* * Copyright(c) 2019 Intel Corporation. * */ #include "aspm.h" /* Time after which the timer interrupt will re-enable ASPM */ #define ASPM_TIMER_MS 1000 /* Time for which interrupts are ignored after a timer has been scheduled */ #define ASPM_RESCHED_TIMER_MS (ASPM_TIMER_MS / 2) /* Two interrupts within this time trigger ASPM disable */ #define ASPM_TRIGGER_MS 1 #define ASPM_TRIGGER_NS (ASPM_TRIGGER_MS * 1000 * 1000ull) #define ASPM_L1_SUPPORTED(reg) \ ((((reg) & PCI_EXP_LNKCAP_ASPMS) >> 10) & 0x2) uint aspm_mode = ASPM_MODE_DISABLED; module_param_named(aspm, aspm_mode, uint, 0444); MODULE_PARM_DESC(aspm, "PCIe ASPM: 0: disable, 1: enable, 2: dynamic"); static bool aspm_hw_l1_supported(struct hfi1_devdata *dd) { struct pci_dev *parent = dd->pcidev->bus->self; u32 up, dn; /* * If the driver does not have access to the upstream component, * it cannot support ASPM L1 at all. */ if (!parent) return false; pcie_capability_read_dword(dd->pcidev, PCI_EXP_LNKCAP, &dn); dn = ASPM_L1_SUPPORTED(dn); pcie_capability_read_dword(parent, PCI_EXP_LNKCAP, &up); up = ASPM_L1_SUPPORTED(up); /* ASPM works on A-step but is reported as not supported */ return (!!dn || is_ax(dd)) && !!up; } /* Set L1 entrance latency for slower entry to L1 */ static void aspm_hw_set_l1_ent_latency(struct hfi1_devdata *dd) { u32 l1_ent_lat = 0x4u; u32 reg32; pci_read_config_dword(dd->pcidev, PCIE_CFG_REG_PL3, &reg32); reg32 &= ~PCIE_CFG_REG_PL3_L1_ENT_LATENCY_SMASK; reg32 |= l1_ent_lat << PCIE_CFG_REG_PL3_L1_ENT_LATENCY_SHIFT; pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL3, reg32); } static void aspm_hw_enable_l1(struct hfi1_devdata *dd) { struct pci_dev *parent = dd->pcidev->bus->self; /* * If the driver does not have access to the upstream component, * it cannot support ASPM L1 at all. */ if (!parent) return; /* Enable ASPM L1 first in upstream component and then downstream */ pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_ASPMC, PCI_EXP_LNKCTL_ASPM_L1); pcie_capability_clear_and_set_word(dd->pcidev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_ASPMC, PCI_EXP_LNKCTL_ASPM_L1); } void aspm_hw_disable_l1(struct hfi1_devdata *dd) { struct pci_dev *parent = dd->pcidev->bus->self; /* Disable ASPM L1 first in downstream component and then upstream */ pcie_capability_clear_and_set_word(dd->pcidev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_ASPMC, 0x0); if (parent) pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_ASPMC, 0x0); } static void aspm_enable(struct hfi1_devdata *dd) { if (dd->aspm_enabled || aspm_mode == ASPM_MODE_DISABLED || !dd->aspm_supported) return; aspm_hw_enable_l1(dd); dd->aspm_enabled = true; } static void aspm_disable(struct hfi1_devdata *dd) { if (!dd->aspm_enabled || aspm_mode == ASPM_MODE_ENABLED) return; aspm_hw_disable_l1(dd); dd->aspm_enabled = false; } static void aspm_disable_inc(struct hfi1_devdata *dd) { unsigned long flags; spin_lock_irqsave(&dd->aspm_lock, flags); aspm_disable(dd); atomic_inc(&dd->aspm_disabled_cnt); spin_unlock_irqrestore(&dd->aspm_lock, flags); } static void aspm_enable_dec(struct hfi1_devdata *dd) { unsigned long flags; spin_lock_irqsave(&dd->aspm_lock, flags); if (atomic_dec_and_test(&dd->aspm_disabled_cnt)) aspm_enable(dd); spin_unlock_irqrestore(&dd->aspm_lock, flags); } /* ASPM processing for each receive context interrupt */ void __aspm_ctx_disable(struct hfi1_ctxtdata *rcd) { bool restart_timer; bool close_interrupts; unsigned long flags; ktime_t now, prev; spin_lock_irqsave(&rcd->aspm_lock, flags); /* PSM contexts are open */ if (!rcd->aspm_intr_enable) goto unlock; prev = rcd->aspm_ts_last_intr; now = ktime_get(); rcd->aspm_ts_last_intr = now; /* An interrupt pair close together in time */ close_interrupts = ktime_to_ns(ktime_sub(now, prev)) < ASPM_TRIGGER_NS; /* Don't push out our timer till this much time has elapsed */ restart_timer = ktime_to_ns(ktime_sub(now, rcd->aspm_ts_timer_sched)) > ASPM_RESCHED_TIMER_MS * NSEC_PER_MSEC; restart_timer = restart_timer && close_interrupts; /* Disable ASPM and schedule timer */ if (rcd->aspm_enabled && close_interrupts) { aspm_disable_inc(rcd->dd); rcd->aspm_enabled = false; restart_timer = true; } if (restart_timer) { mod_timer(&rcd->aspm_timer, jiffies + msecs_to_jiffies(ASPM_TIMER_MS)); rcd->aspm_ts_timer_sched = now; } unlock: spin_unlock_irqrestore(&rcd->aspm_lock, flags); } /* Timer function for re-enabling ASPM in the absence of interrupt activity */ static void aspm_ctx_timer_function(struct timer_list *t) { struct hfi1_ctxtdata *rcd = from_timer(rcd, t, aspm_timer); unsigned long flags; spin_lock_irqsave(&rcd->aspm_lock, flags); aspm_enable_dec(rcd->dd); rcd->aspm_enabled = true; spin_unlock_irqrestore(&rcd->aspm_lock, flags); } /* * Disable interrupt processing for verbs contexts when PSM or VNIC contexts * are open. */ void aspm_disable_all(struct hfi1_devdata *dd) { struct hfi1_ctxtdata *rcd; unsigned long flags; u16 i; for (i = 0; i < dd->first_dyn_alloc_ctxt; i++) { rcd = hfi1_rcd_get_by_index(dd, i); if (rcd) { del_timer_sync(&rcd->aspm_timer); spin_lock_irqsave(&rcd->aspm_lock, flags); rcd->aspm_intr_enable = false; spin_unlock_irqrestore(&rcd->aspm_lock, flags); hfi1_rcd_put(rcd); } } aspm_disable(dd); atomic_set(&dd->aspm_disabled_cnt, 0); } /* Re-enable interrupt processing for verbs contexts */ void aspm_enable_all(struct hfi1_devdata *dd) { struct hfi1_ctxtdata *rcd; unsigned long flags; u16 i; aspm_enable(dd); if (aspm_mode != ASPM_MODE_DYNAMIC) return; for (i = 0; i < dd->first_dyn_alloc_ctxt; i++) { rcd = hfi1_rcd_get_by_index(dd, i); if (rcd) { spin_lock_irqsave(&rcd->aspm_lock, flags); rcd->aspm_intr_enable = true; rcd->aspm_enabled = true; spin_unlock_irqrestore(&rcd->aspm_lock, flags); hfi1_rcd_put(rcd); } } } static void aspm_ctx_init(struct hfi1_ctxtdata *rcd) { spin_lock_init(&rcd->aspm_lock); timer_setup(&rcd->aspm_timer, aspm_ctx_timer_function, 0); rcd->aspm_intr_supported = rcd->dd->aspm_supported && aspm_mode == ASPM_MODE_DYNAMIC && rcd->ctxt < rcd->dd->first_dyn_alloc_ctxt; } void aspm_init(struct hfi1_devdata *dd) { struct hfi1_ctxtdata *rcd; u16 i; spin_lock_init(&dd->aspm_lock); dd->aspm_supported = aspm_hw_l1_supported(dd); for (i = 0; i < dd->first_dyn_alloc_ctxt; i++) { rcd = hfi1_rcd_get_by_index(dd, i); if (rcd) aspm_ctx_init(rcd); hfi1_rcd_put(rcd); } /* Start with ASPM disabled */ aspm_hw_set_l1_ent_latency(dd); dd->aspm_enabled = false; aspm_hw_disable_l1(dd); /* Now turn on ASPM if configured */ aspm_enable_all(dd); } void aspm_exit(struct hfi1_devdata *dd) { aspm_disable_all(dd); /* Turn on ASPM on exit to conserve power */ aspm_enable(dd); }
linux-master
drivers/infiniband/hw/hfi1/aspm.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2015, 2016 Intel Corporation. */ #include <linux/delay.h> #include <linux/pci.h> #include <linux/vmalloc.h> #include "hfi.h" /* for the given bus number, return the CSR for reading an i2c line */ static inline u32 i2c_in_csr(u32 bus_num) { return bus_num ? ASIC_QSFP2_IN : ASIC_QSFP1_IN; } /* for the given bus number, return the CSR for writing an i2c line */ static inline u32 i2c_oe_csr(u32 bus_num) { return bus_num ? ASIC_QSFP2_OE : ASIC_QSFP1_OE; } static void hfi1_setsda(void *data, int state) { struct hfi1_i2c_bus *bus = (struct hfi1_i2c_bus *)data; struct hfi1_devdata *dd = bus->controlling_dd; u64 reg; u32 target_oe; target_oe = i2c_oe_csr(bus->num); reg = read_csr(dd, target_oe); /* * The OE bit value is inverted and connected to the pin. When * OE is 0 the pin is left to be pulled up, when the OE is 1 * the pin is driven low. This matches the "open drain" or "open * collector" convention. */ if (state) reg &= ~QSFP_HFI0_I2CDAT; else reg |= QSFP_HFI0_I2CDAT; write_csr(dd, target_oe, reg); /* do a read to force the write into the chip */ (void)read_csr(dd, target_oe); } static void hfi1_setscl(void *data, int state) { struct hfi1_i2c_bus *bus = (struct hfi1_i2c_bus *)data; struct hfi1_devdata *dd = bus->controlling_dd; u64 reg; u32 target_oe; target_oe = i2c_oe_csr(bus->num); reg = read_csr(dd, target_oe); /* * The OE bit value is inverted and connected to the pin. When * OE is 0 the pin is left to be pulled up, when the OE is 1 * the pin is driven low. This matches the "open drain" or "open * collector" convention. */ if (state) reg &= ~QSFP_HFI0_I2CCLK; else reg |= QSFP_HFI0_I2CCLK; write_csr(dd, target_oe, reg); /* do a read to force the write into the chip */ (void)read_csr(dd, target_oe); } static int hfi1_getsda(void *data) { struct hfi1_i2c_bus *bus = (struct hfi1_i2c_bus *)data; u64 reg; u32 target_in; hfi1_setsda(data, 1); /* clear OE so we do not pull line down */ udelay(2); /* 1us pull up + 250ns hold */ target_in = i2c_in_csr(bus->num); reg = read_csr(bus->controlling_dd, target_in); return !!(reg & QSFP_HFI0_I2CDAT); } static int hfi1_getscl(void *data) { struct hfi1_i2c_bus *bus = (struct hfi1_i2c_bus *)data; u64 reg; u32 target_in; hfi1_setscl(data, 1); /* clear OE so we do not pull line down */ udelay(2); /* 1us pull up + 250ns hold */ target_in = i2c_in_csr(bus->num); reg = read_csr(bus->controlling_dd, target_in); return !!(reg & QSFP_HFI0_I2CCLK); } /* * Allocate and initialize the given i2c bus number. * Returns NULL on failure. */ static struct hfi1_i2c_bus *init_i2c_bus(struct hfi1_devdata *dd, struct hfi1_asic_data *ad, int num) { struct hfi1_i2c_bus *bus; int ret; bus = kzalloc(sizeof(*bus), GFP_KERNEL); if (!bus) return NULL; bus->controlling_dd = dd; bus->num = num; /* our bus number */ bus->algo.setsda = hfi1_setsda; bus->algo.setscl = hfi1_setscl; bus->algo.getsda = hfi1_getsda; bus->algo.getscl = hfi1_getscl; bus->algo.udelay = 5; bus->algo.timeout = usecs_to_jiffies(100000); bus->algo.data = bus; bus->adapter.owner = THIS_MODULE; bus->adapter.algo_data = &bus->algo; bus->adapter.dev.parent = &dd->pcidev->dev; snprintf(bus->adapter.name, sizeof(bus->adapter.name), "hfi1_i2c%d", num); ret = i2c_bit_add_bus(&bus->adapter); if (ret) { dd_dev_info(dd, "%s: unable to add i2c bus %d, err %d\n", __func__, num, ret); kfree(bus); return NULL; } return bus; } /* * Initialize i2c buses. * Return 0 on success, -errno on error. */ int set_up_i2c(struct hfi1_devdata *dd, struct hfi1_asic_data *ad) { ad->i2c_bus0 = init_i2c_bus(dd, ad, 0); ad->i2c_bus1 = init_i2c_bus(dd, ad, 1); if (!ad->i2c_bus0 || !ad->i2c_bus1) return -ENOMEM; return 0; }; static void clean_i2c_bus(struct hfi1_i2c_bus *bus) { if (bus) { i2c_del_adapter(&bus->adapter); kfree(bus); } } void clean_up_i2c(struct hfi1_devdata *dd, struct hfi1_asic_data *ad) { if (!ad) return; clean_i2c_bus(ad->i2c_bus0); ad->i2c_bus0 = NULL; clean_i2c_bus(ad->i2c_bus1); ad->i2c_bus1 = NULL; } static int i2c_bus_write(struct hfi1_devdata *dd, struct hfi1_i2c_bus *i2c, u8 slave_addr, int offset, int offset_size, u8 *data, u16 len) { int ret; int num_msgs; u8 offset_bytes[2]; struct i2c_msg msgs[2]; switch (offset_size) { case 0: num_msgs = 1; msgs[0].addr = slave_addr; msgs[0].flags = 0; msgs[0].len = len; msgs[0].buf = data; break; case 2: offset_bytes[1] = (offset >> 8) & 0xff; fallthrough; case 1: num_msgs = 2; offset_bytes[0] = offset & 0xff; msgs[0].addr = slave_addr; msgs[0].flags = 0; msgs[0].len = offset_size; msgs[0].buf = offset_bytes; msgs[1].addr = slave_addr; msgs[1].flags = I2C_M_NOSTART; msgs[1].len = len; msgs[1].buf = data; break; default: return -EINVAL; } i2c->controlling_dd = dd; ret = i2c_transfer(&i2c->adapter, msgs, num_msgs); if (ret != num_msgs) { dd_dev_err(dd, "%s: bus %d, i2c slave 0x%x, offset 0x%x, len 0x%x; write failed, ret %d\n", __func__, i2c->num, slave_addr, offset, len, ret); return ret < 0 ? ret : -EIO; } return 0; } static int i2c_bus_read(struct hfi1_devdata *dd, struct hfi1_i2c_bus *bus, u8 slave_addr, int offset, int offset_size, u8 *data, u16 len) { int ret; int num_msgs; u8 offset_bytes[2]; struct i2c_msg msgs[2]; switch (offset_size) { case 0: num_msgs = 1; msgs[0].addr = slave_addr; msgs[0].flags = I2C_M_RD; msgs[0].len = len; msgs[0].buf = data; break; case 2: offset_bytes[1] = (offset >> 8) & 0xff; fallthrough; case 1: num_msgs = 2; offset_bytes[0] = offset & 0xff; msgs[0].addr = slave_addr; msgs[0].flags = 0; msgs[0].len = offset_size; msgs[0].buf = offset_bytes; msgs[1].addr = slave_addr; msgs[1].flags = I2C_M_RD; msgs[1].len = len; msgs[1].buf = data; break; default: return -EINVAL; } bus->controlling_dd = dd; ret = i2c_transfer(&bus->adapter, msgs, num_msgs); if (ret != num_msgs) { dd_dev_err(dd, "%s: bus %d, i2c slave 0x%x, offset 0x%x, len 0x%x; read failed, ret %d\n", __func__, bus->num, slave_addr, offset, len, ret); return ret < 0 ? ret : -EIO; } return 0; } /* * Raw i2c write. No set-up or lock checking. * * Return 0 on success, -errno on error. */ static int __i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset, void *bp, int len) { struct hfi1_devdata *dd = ppd->dd; struct hfi1_i2c_bus *bus; u8 slave_addr; int offset_size; bus = target ? dd->asic_data->i2c_bus1 : dd->asic_data->i2c_bus0; slave_addr = (i2c_addr & 0xff) >> 1; /* convert to 7-bit addr */ offset_size = (i2c_addr >> 8) & 0x3; return i2c_bus_write(dd, bus, slave_addr, offset, offset_size, bp, len); } /* * Caller must hold the i2c chain resource. * * Return number of bytes written, or -errno. */ int i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset, void *bp, int len) { int ret; if (!check_chip_resource(ppd->dd, i2c_target(target), __func__)) return -EACCES; ret = __i2c_write(ppd, target, i2c_addr, offset, bp, len); if (ret) return ret; return len; } /* * Raw i2c read. No set-up or lock checking. * * Return 0 on success, -errno on error. */ static int __i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset, void *bp, int len) { struct hfi1_devdata *dd = ppd->dd; struct hfi1_i2c_bus *bus; u8 slave_addr; int offset_size; bus = target ? dd->asic_data->i2c_bus1 : dd->asic_data->i2c_bus0; slave_addr = (i2c_addr & 0xff) >> 1; /* convert to 7-bit addr */ offset_size = (i2c_addr >> 8) & 0x3; return i2c_bus_read(dd, bus, slave_addr, offset, offset_size, bp, len); } /* * Caller must hold the i2c chain resource. * * Return number of bytes read, or -errno. */ int i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset, void *bp, int len) { int ret; if (!check_chip_resource(ppd->dd, i2c_target(target), __func__)) return -EACCES; ret = __i2c_read(ppd, target, i2c_addr, offset, bp, len); if (ret) return ret; return len; } /* * Write page n, offset m of QSFP memory as defined by SFF 8636 * by writing @addr = ((256 * n) + m) * * Caller must hold the i2c chain resource. * * Return number of bytes written or -errno. */ int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, int len) { int count = 0; int offset; int nwrite; int ret = 0; u8 page; if (!check_chip_resource(ppd->dd, i2c_target(target), __func__)) return -EACCES; while (count < len) { /* * Set the qsfp page based on a zero-based address * and a page size of QSFP_PAGESIZE bytes. */ page = (u8)(addr / QSFP_PAGESIZE); ret = __i2c_write(ppd, target, QSFP_DEV | QSFP_OFFSET_SIZE, QSFP_PAGE_SELECT_BYTE_OFFS, &page, 1); /* QSFPs require a 5-10msec delay after write operations */ mdelay(5); if (ret) { hfi1_dev_porterr(ppd->dd, ppd->port, "QSFP chain %d can't write QSFP_PAGE_SELECT_BYTE: %d\n", target, ret); break; } offset = addr % QSFP_PAGESIZE; nwrite = len - count; /* truncate write to boundary if crossing boundary */ if (((addr % QSFP_RW_BOUNDARY) + nwrite) > QSFP_RW_BOUNDARY) nwrite = QSFP_RW_BOUNDARY - (addr % QSFP_RW_BOUNDARY); ret = __i2c_write(ppd, target, QSFP_DEV | QSFP_OFFSET_SIZE, offset, bp + count, nwrite); /* QSFPs require a 5-10msec delay after write operations */ mdelay(5); if (ret) /* stop on error */ break; count += nwrite; addr += nwrite; } if (ret < 0) return ret; return count; } /* * Perform a stand-alone single QSFP write. Acquire the resource, do the * write, then release the resource. */ int one_qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, int len) { struct hfi1_devdata *dd = ppd->dd; u32 resource = qsfp_resource(dd); int ret; ret = acquire_chip_resource(dd, resource, QSFP_WAIT); if (ret) return ret; ret = qsfp_write(ppd, target, addr, bp, len); release_chip_resource(dd, resource); return ret; } /* * Access page n, offset m of QSFP memory as defined by SFF 8636 * by reading @addr = ((256 * n) + m) * * Caller must hold the i2c chain resource. * * Return the number of bytes read or -errno. */ int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, int len) { int count = 0; int offset; int nread; int ret = 0; u8 page; if (!check_chip_resource(ppd->dd, i2c_target(target), __func__)) return -EACCES; while (count < len) { /* * Set the qsfp page based on a zero-based address * and a page size of QSFP_PAGESIZE bytes. */ page = (u8)(addr / QSFP_PAGESIZE); ret = __i2c_write(ppd, target, QSFP_DEV | QSFP_OFFSET_SIZE, QSFP_PAGE_SELECT_BYTE_OFFS, &page, 1); /* QSFPs require a 5-10msec delay after write operations */ mdelay(5); if (ret) { hfi1_dev_porterr(ppd->dd, ppd->port, "QSFP chain %d can't write QSFP_PAGE_SELECT_BYTE: %d\n", target, ret); break; } offset = addr % QSFP_PAGESIZE; nread = len - count; /* truncate read to boundary if crossing boundary */ if (((addr % QSFP_RW_BOUNDARY) + nread) > QSFP_RW_BOUNDARY) nread = QSFP_RW_BOUNDARY - (addr % QSFP_RW_BOUNDARY); ret = __i2c_read(ppd, target, QSFP_DEV | QSFP_OFFSET_SIZE, offset, bp + count, nread); if (ret) /* stop on error */ break; count += nread; addr += nread; } if (ret < 0) return ret; return count; } /* * Perform a stand-alone single QSFP read. Acquire the resource, do the * read, then release the resource. */ int one_qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, int len) { struct hfi1_devdata *dd = ppd->dd; u32 resource = qsfp_resource(dd); int ret; ret = acquire_chip_resource(dd, resource, QSFP_WAIT); if (ret) return ret; ret = qsfp_read(ppd, target, addr, bp, len); release_chip_resource(dd, resource); return ret; } /* * This function caches the QSFP memory range in 128 byte chunks. * As an example, the next byte after address 255 is byte 128 from * upper page 01H (if existing) rather than byte 0 from lower page 00H. * Access page n, offset m of QSFP memory as defined by SFF 8636 * in the cache by reading byte ((128 * n) + m) * The calls to qsfp_{read,write} in this function correctly handle the * address map difference between this mapping and the mapping implemented * by those functions * * The caller must be holding the QSFP i2c chain resource. */ int refresh_qsfp_cache(struct hfi1_pportdata *ppd, struct qsfp_data *cp) { u32 target = ppd->dd->hfi1_id; int ret; unsigned long flags; u8 *cache = &cp->cache[0]; /* ensure sane contents on invalid reads, for cable swaps */ memset(cache, 0, (QSFP_MAX_NUM_PAGES * 128)); spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); ppd->qsfp_info.cache_valid = 0; spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags); if (!qsfp_mod_present(ppd)) { ret = -ENODEV; goto bail; } ret = qsfp_read(ppd, target, 0, cache, QSFP_PAGESIZE); if (ret != QSFP_PAGESIZE) { dd_dev_info(ppd->dd, "%s: Page 0 read failed, expected %d, got %d\n", __func__, QSFP_PAGESIZE, ret); goto bail; } /* Is paging enabled? */ if (!(cache[2] & 4)) { /* Paging enabled, page 03 required */ if ((cache[195] & 0xC0) == 0xC0) { /* all */ ret = qsfp_read(ppd, target, 384, cache + 256, 128); if (ret <= 0 || ret != 128) { dd_dev_info(ppd->dd, "%s failed\n", __func__); goto bail; } ret = qsfp_read(ppd, target, 640, cache + 384, 128); if (ret <= 0 || ret != 128) { dd_dev_info(ppd->dd, "%s failed\n", __func__); goto bail; } ret = qsfp_read(ppd, target, 896, cache + 512, 128); if (ret <= 0 || ret != 128) { dd_dev_info(ppd->dd, "%s failed\n", __func__); goto bail; } } else if ((cache[195] & 0x80) == 0x80) { /* only page 2 and 3 */ ret = qsfp_read(ppd, target, 640, cache + 384, 128); if (ret <= 0 || ret != 128) { dd_dev_info(ppd->dd, "%s failed\n", __func__); goto bail; } ret = qsfp_read(ppd, target, 896, cache + 512, 128); if (ret <= 0 || ret != 128) { dd_dev_info(ppd->dd, "%s failed\n", __func__); goto bail; } } else if ((cache[195] & 0x40) == 0x40) { /* only page 1 and 3 */ ret = qsfp_read(ppd, target, 384, cache + 256, 128); if (ret <= 0 || ret != 128) { dd_dev_info(ppd->dd, "%s failed\n", __func__); goto bail; } ret = qsfp_read(ppd, target, 896, cache + 512, 128); if (ret <= 0 || ret != 128) { dd_dev_info(ppd->dd, "%s failed\n", __func__); goto bail; } } else { /* only page 3 */ ret = qsfp_read(ppd, target, 896, cache + 512, 128); if (ret <= 0 || ret != 128) { dd_dev_info(ppd->dd, "%s failed\n", __func__); goto bail; } } } spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); ppd->qsfp_info.cache_valid = 1; ppd->qsfp_info.cache_refresh_required = 0; spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags); return 0; bail: memset(cache, 0, (QSFP_MAX_NUM_PAGES * 128)); return ret; } const char * const hfi1_qsfp_devtech[16] = { "850nm VCSEL", "1310nm VCSEL", "1550nm VCSEL", "1310nm FP", "1310nm DFB", "1550nm DFB", "1310nm EML", "1550nm EML", "Cu Misc", "1490nm DFB", "Cu NoEq", "Cu Eq", "Undef", "Cu Active BothEq", "Cu FarEq", "Cu NearEq" }; #define QSFP_DUMP_CHUNK 16 /* Holds longest string */ #define QSFP_DEFAULT_HDR_CNT 224 #define QSFP_PWR(pbyte) (((pbyte) >> 6) & 3) #define QSFP_HIGH_PWR(pbyte) ((pbyte) & 3) /* For use with QSFP_HIGH_PWR macro */ #define QSFP_HIGH_PWR_UNUSED 0 /* Bits [1:0] = 00 implies low power module */ /* * Takes power class byte [Page 00 Byte 129] in SFF 8636 * Returns power class as integer (1 through 7, per SFF 8636 rev 2.4) */ int get_qsfp_power_class(u8 power_byte) { if (QSFP_HIGH_PWR(power_byte) == QSFP_HIGH_PWR_UNUSED) /* power classes count from 1, their bit encodings from 0 */ return (QSFP_PWR(power_byte) + 1); /* * 00 in the high power classes stands for unused, bringing * balance to the off-by-1 offset above, we add 4 here to * account for the difference between the low and high power * groups */ return (QSFP_HIGH_PWR(power_byte) + 4); } int qsfp_mod_present(struct hfi1_pportdata *ppd) { struct hfi1_devdata *dd = ppd->dd; u64 reg; reg = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_IN : ASIC_QSFP1_IN); return !(reg & QSFP_HFI0_MODPRST_N); } /* * This function maps QSFP memory addresses in 128 byte chunks in the following * fashion per the CableInfo SMA query definition in the IBA 1.3 spec/OPA Gen 1 * spec * For addr 000-127, lower page 00h * For addr 128-255, upper page 00h * For addr 256-383, upper page 01h * For addr 384-511, upper page 02h * For addr 512-639, upper page 03h * * For addresses beyond this range, it returns the invalid range of data buffer * set to 0. * For upper pages that are optional, if they are not valid, returns the * particular range of bytes in the data buffer set to 0. */ int get_cable_info(struct hfi1_devdata *dd, u32 port_num, u32 addr, u32 len, u8 *data) { struct hfi1_pportdata *ppd; u32 excess_len = len; int ret = 0, offset = 0; if (port_num > dd->num_pports || port_num < 1) { dd_dev_info(dd, "%s: Invalid port number %d\n", __func__, port_num); ret = -EINVAL; goto set_zeroes; } ppd = dd->pport + (port_num - 1); if (!qsfp_mod_present(ppd)) { ret = -ENODEV; goto set_zeroes; } if (!ppd->qsfp_info.cache_valid) { ret = -EINVAL; goto set_zeroes; } if (addr >= (QSFP_MAX_NUM_PAGES * 128)) { ret = -ERANGE; goto set_zeroes; } if ((addr + len) > (QSFP_MAX_NUM_PAGES * 128)) { excess_len = (addr + len) - (QSFP_MAX_NUM_PAGES * 128); memcpy(data, &ppd->qsfp_info.cache[addr], (len - excess_len)); data += (len - excess_len); goto set_zeroes; } memcpy(data, &ppd->qsfp_info.cache[addr], len); if (addr <= QSFP_MONITOR_VAL_END && (addr + len) >= QSFP_MONITOR_VAL_START) { /* Overlap with the dynamic channel monitor range */ if (addr < QSFP_MONITOR_VAL_START) { if (addr + len <= QSFP_MONITOR_VAL_END) len = addr + len - QSFP_MONITOR_VAL_START; else len = QSFP_MONITOR_RANGE; offset = QSFP_MONITOR_VAL_START - addr; addr = QSFP_MONITOR_VAL_START; } else if (addr == QSFP_MONITOR_VAL_START) { offset = 0; if (addr + len > QSFP_MONITOR_VAL_END) len = QSFP_MONITOR_RANGE; } else { offset = 0; if (addr + len > QSFP_MONITOR_VAL_END) len = QSFP_MONITOR_VAL_END - addr + 1; } /* Refresh the values of the dynamic monitors from the cable */ ret = one_qsfp_read(ppd, dd->hfi1_id, addr, data + offset, len); if (ret != len) { ret = -EAGAIN; goto set_zeroes; } } return 0; set_zeroes: memset(data, 0, excess_len); return ret; } static const char *pwr_codes[8] = {"N/AW", "1.5W", "2.0W", "2.5W", "3.5W", "4.0W", "4.5W", "5.0W" }; int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len) { u8 *cache = &ppd->qsfp_info.cache[0]; u8 bin_buff[QSFP_DUMP_CHUNK]; char lenstr[6]; int sofar; int bidx = 0; u8 *atten = &cache[QSFP_ATTEN_OFFS]; u8 *vendor_oui = &cache[QSFP_VOUI_OFFS]; u8 power_byte = 0; sofar = 0; lenstr[0] = ' '; lenstr[1] = '\0'; if (ppd->qsfp_info.cache_valid) { if (QSFP_IS_CU(cache[QSFP_MOD_TECH_OFFS])) snprintf(lenstr, sizeof(lenstr), "%dM ", cache[QSFP_MOD_LEN_OFFS]); power_byte = cache[QSFP_MOD_PWR_OFFS]; sofar += scnprintf(buf + sofar, len - sofar, "PWR:%.3sW\n", pwr_codes[get_qsfp_power_class(power_byte)]); sofar += scnprintf(buf + sofar, len - sofar, "TECH:%s%s\n", lenstr, hfi1_qsfp_devtech[(cache[QSFP_MOD_TECH_OFFS]) >> 4]); sofar += scnprintf(buf + sofar, len - sofar, "Vendor:%.*s\n", QSFP_VEND_LEN, &cache[QSFP_VEND_OFFS]); sofar += scnprintf(buf + sofar, len - sofar, "OUI:%06X\n", QSFP_OUI(vendor_oui)); sofar += scnprintf(buf + sofar, len - sofar, "Part#:%.*s\n", QSFP_PN_LEN, &cache[QSFP_PN_OFFS]); sofar += scnprintf(buf + sofar, len - sofar, "Rev:%.*s\n", QSFP_REV_LEN, &cache[QSFP_REV_OFFS]); if (QSFP_IS_CU(cache[QSFP_MOD_TECH_OFFS])) sofar += scnprintf(buf + sofar, len - sofar, "Atten:%d, %d\n", QSFP_ATTEN_SDR(atten), QSFP_ATTEN_DDR(atten)); sofar += scnprintf(buf + sofar, len - sofar, "Serial:%.*s\n", QSFP_SN_LEN, &cache[QSFP_SN_OFFS]); sofar += scnprintf(buf + sofar, len - sofar, "Date:%.*s\n", QSFP_DATE_LEN, &cache[QSFP_DATE_OFFS]); sofar += scnprintf(buf + sofar, len - sofar, "Lot:%.*s\n", QSFP_LOT_LEN, &cache[QSFP_LOT_OFFS]); while (bidx < QSFP_DEFAULT_HDR_CNT) { int iidx; memcpy(bin_buff, &cache[bidx], QSFP_DUMP_CHUNK); for (iidx = 0; iidx < QSFP_DUMP_CHUNK; ++iidx) { sofar += scnprintf(buf + sofar, len - sofar, " %02X", bin_buff[iidx]); } sofar += scnprintf(buf + sofar, len - sofar, "\n"); bidx += QSFP_DUMP_CHUNK; } } return sofar; }
linux-master
drivers/infiniband/hw/hfi1/qsfp.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2015, 2016 Intel Corporation. */ #include <linux/string.h> #include <linux/string_helpers.h> #include "efivar.h" /* GUID for HFI1 variables in EFI */ #define HFI1_EFIVAR_GUID EFI_GUID(0xc50a953e, 0xa8b2, 0x42a6, \ 0xbf, 0x89, 0xd3, 0x33, 0xa6, 0xe9, 0xe6, 0xd4) /* largest EFI data size we expect */ #define EFI_DATA_SIZE 4096 /* * Read the named EFI variable. Return the size of the actual data in *size * and a kmalloc'ed buffer in *return_data. The caller must free the * data. It is guaranteed that *return_data will be NULL and *size = 0 * if this routine fails. * * Return 0 on success, -errno on failure. */ static int read_efi_var(const char *name, unsigned long *size, void **return_data) { efi_status_t status; efi_char16_t *uni_name; efi_guid_t guid; unsigned long temp_size; void *temp_buffer; void *data; int i; int ret; /* set failure return values */ *size = 0; *return_data = NULL; if (!efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE)) return -EOPNOTSUPP; uni_name = kcalloc(strlen(name) + 1, sizeof(efi_char16_t), GFP_KERNEL); temp_buffer = kzalloc(EFI_DATA_SIZE, GFP_KERNEL); if (!uni_name || !temp_buffer) { ret = -ENOMEM; goto fail; } /* input: the size of the buffer */ temp_size = EFI_DATA_SIZE; /* convert ASCII to unicode - it is a 1:1 mapping */ for (i = 0; name[i]; i++) uni_name[i] = name[i]; /* need a variable for our GUID */ guid = HFI1_EFIVAR_GUID; /* call into EFI runtime services */ status = efi.get_variable( uni_name, &guid, NULL, &temp_size, temp_buffer); /* * It would be nice to call efi_status_to_err() here, but that * is in the EFIVAR_FS code and may not be compiled in. * However, even that is insufficient since it does not cover * EFI_BUFFER_TOO_SMALL which could be an important return. * For now, just split out success or not found. */ ret = status == EFI_SUCCESS ? 0 : status == EFI_NOT_FOUND ? -ENOENT : -EINVAL; if (ret) goto fail; /* * We have successfully read the EFI variable into our * temporary buffer. Now allocate a correctly sized * buffer. */ data = kmemdup(temp_buffer, temp_size, GFP_KERNEL); if (!data) { ret = -ENOMEM; goto fail; } *size = temp_size; *return_data = data; fail: kfree(uni_name); kfree(temp_buffer); return ret; } /* * Read an HFI1 EFI variable of the form: * <PCIe address>-<kind> * Return an kalloc'ed array and size of the data. * * Returns 0 on success, -errno on failure. */ int read_hfi1_efi_var(struct hfi1_devdata *dd, const char *kind, unsigned long *size, void **return_data) { char prefix_name[64]; char name[64]; int result; /* create a common prefix */ snprintf(prefix_name, sizeof(prefix_name), "%04x:%02x:%02x.%x", pci_domain_nr(dd->pcidev->bus), dd->pcidev->bus->number, PCI_SLOT(dd->pcidev->devfn), PCI_FUNC(dd->pcidev->devfn)); snprintf(name, sizeof(name), "%s-%s", prefix_name, kind); result = read_efi_var(name, size, return_data); /* * If reading the lowercase EFI variable fail, read the uppercase * variable. */ if (result) { string_upper(prefix_name, prefix_name); snprintf(name, sizeof(name), "%s-%s", prefix_name, kind); result = read_efi_var(name, size, return_data); } return result; }
linux-master
drivers/infiniband/hw/hfi1/efivar.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2017 - 2018 Intel Corporation. */ /* * This file contains HFI1 support for VNIC SDMA functionality */ #include "sdma.h" #include "vnic.h" #define HFI1_VNIC_SDMA_Q_ACTIVE BIT(0) #define HFI1_VNIC_SDMA_Q_DEFERRED BIT(1) #define HFI1_VNIC_TXREQ_NAME_LEN 32 #define HFI1_VNIC_SDMA_DESC_WTRMRK 64 /* * struct vnic_txreq - VNIC transmit descriptor * @txreq: sdma transmit request * @sdma: vnic sdma pointer * @skb: skb to send * @pad: pad buffer * @plen: pad length * @pbc_val: pbc value */ struct vnic_txreq { struct sdma_txreq txreq; struct hfi1_vnic_sdma *sdma; struct sk_buff *skb; unsigned char pad[HFI1_VNIC_MAX_PAD]; u16 plen; __le64 pbc_val; }; static void vnic_sdma_complete(struct sdma_txreq *txreq, int status) { struct vnic_txreq *tx = container_of(txreq, struct vnic_txreq, txreq); struct hfi1_vnic_sdma *vnic_sdma = tx->sdma; sdma_txclean(vnic_sdma->dd, txreq); dev_kfree_skb_any(tx->skb); kmem_cache_free(vnic_sdma->dd->vnic.txreq_cache, tx); } static noinline int build_vnic_ulp_payload(struct sdma_engine *sde, struct vnic_txreq *tx) { int i, ret = 0; ret = sdma_txadd_kvaddr( sde->dd, &tx->txreq, tx->skb->data, skb_headlen(tx->skb)); if (unlikely(ret)) goto bail_txadd; for (i = 0; i < skb_shinfo(tx->skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(tx->skb)->frags[i]; /* combine physically continuous fragments later? */ ret = sdma_txadd_page(sde->dd, &tx->txreq, skb_frag_page(frag), skb_frag_off(frag), skb_frag_size(frag), NULL, NULL, NULL); if (unlikely(ret)) goto bail_txadd; } if (tx->plen) ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq, tx->pad + HFI1_VNIC_MAX_PAD - tx->plen, tx->plen); bail_txadd: return ret; } static int build_vnic_tx_desc(struct sdma_engine *sde, struct vnic_txreq *tx, u64 pbc) { int ret = 0; u16 hdrbytes = 2 << 2; /* PBC */ ret = sdma_txinit_ahg( &tx->txreq, 0, hdrbytes + tx->skb->len + tx->plen, 0, 0, NULL, 0, vnic_sdma_complete); if (unlikely(ret)) goto bail_txadd; /* add pbc */ tx->pbc_val = cpu_to_le64(pbc); ret = sdma_txadd_kvaddr( sde->dd, &tx->txreq, &tx->pbc_val, hdrbytes); if (unlikely(ret)) goto bail_txadd; /* add the ulp payload */ ret = build_vnic_ulp_payload(sde, tx); bail_txadd: return ret; } /* setup the last plen bypes of pad */ static inline void hfi1_vnic_update_pad(unsigned char *pad, u8 plen) { pad[HFI1_VNIC_MAX_PAD - 1] = plen - OPA_VNIC_ICRC_TAIL_LEN; } int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx, struct hfi1_vnic_vport_info *vinfo, struct sk_buff *skb, u64 pbc, u8 plen) { struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx]; struct sdma_engine *sde = vnic_sdma->sde; struct vnic_txreq *tx; int ret = -ECOMM; if (unlikely(READ_ONCE(vnic_sdma->state) != HFI1_VNIC_SDMA_Q_ACTIVE)) goto tx_err; if (unlikely(!sde || !sdma_running(sde))) goto tx_err; tx = kmem_cache_alloc(dd->vnic.txreq_cache, GFP_ATOMIC); if (unlikely(!tx)) { ret = -ENOMEM; goto tx_err; } tx->sdma = vnic_sdma; tx->skb = skb; hfi1_vnic_update_pad(tx->pad, plen); tx->plen = plen; ret = build_vnic_tx_desc(sde, tx, pbc); if (unlikely(ret)) goto free_desc; ret = sdma_send_txreq(sde, iowait_get_ib_work(&vnic_sdma->wait), &tx->txreq, vnic_sdma->pkts_sent); /* When -ECOMM, sdma callback will be called with ABORT status */ if (unlikely(ret && unlikely(ret != -ECOMM))) goto free_desc; if (!ret) { vnic_sdma->pkts_sent = true; iowait_starve_clear(vnic_sdma->pkts_sent, &vnic_sdma->wait); } return ret; free_desc: sdma_txclean(dd, &tx->txreq); kmem_cache_free(dd->vnic.txreq_cache, tx); tx_err: if (ret != -EBUSY) dev_kfree_skb_any(skb); else vnic_sdma->pkts_sent = false; return ret; } /* * hfi1_vnic_sdma_sleep - vnic sdma sleep function * * This function gets called from sdma_send_txreq() when there are not enough * sdma descriptors available to send the packet. It adds Tx queue's wait * structure to sdma engine's dmawait list to be woken up when descriptors * become available. */ static int hfi1_vnic_sdma_sleep(struct sdma_engine *sde, struct iowait_work *wait, struct sdma_txreq *txreq, uint seq, bool pkts_sent) { struct hfi1_vnic_sdma *vnic_sdma = container_of(wait->iow, struct hfi1_vnic_sdma, wait); write_seqlock(&sde->waitlock); if (sdma_progress(sde, seq, txreq)) { write_sequnlock(&sde->waitlock); return -EAGAIN; } vnic_sdma->state = HFI1_VNIC_SDMA_Q_DEFERRED; if (list_empty(&vnic_sdma->wait.list)) { iowait_get_priority(wait->iow); iowait_queue(pkts_sent, wait->iow, &sde->dmawait); } write_sequnlock(&sde->waitlock); return -EBUSY; } /* * hfi1_vnic_sdma_wakeup - vnic sdma wakeup function * * This function gets called when SDMA descriptors becomes available and Tx * queue's wait structure was previously added to sdma engine's dmawait list. * It notifies the upper driver about Tx queue wakeup. */ static void hfi1_vnic_sdma_wakeup(struct iowait *wait, int reason) { struct hfi1_vnic_sdma *vnic_sdma = container_of(wait, struct hfi1_vnic_sdma, wait); struct hfi1_vnic_vport_info *vinfo = vnic_sdma->vinfo; vnic_sdma->state = HFI1_VNIC_SDMA_Q_ACTIVE; if (__netif_subqueue_stopped(vinfo->netdev, vnic_sdma->q_idx)) netif_wake_subqueue(vinfo->netdev, vnic_sdma->q_idx); }; inline bool hfi1_vnic_sdma_write_avail(struct hfi1_vnic_vport_info *vinfo, u8 q_idx) { struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx]; return (READ_ONCE(vnic_sdma->state) == HFI1_VNIC_SDMA_Q_ACTIVE); } void hfi1_vnic_sdma_init(struct hfi1_vnic_vport_info *vinfo) { int i; for (i = 0; i < vinfo->num_tx_q; i++) { struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[i]; iowait_init(&vnic_sdma->wait, 0, NULL, NULL, hfi1_vnic_sdma_sleep, hfi1_vnic_sdma_wakeup, NULL, NULL); vnic_sdma->sde = &vinfo->dd->per_sdma[i]; vnic_sdma->dd = vinfo->dd; vnic_sdma->vinfo = vinfo; vnic_sdma->q_idx = i; vnic_sdma->state = HFI1_VNIC_SDMA_Q_ACTIVE; /* Add a free descriptor watermark for wakeups */ if (vnic_sdma->sde->descq_cnt > HFI1_VNIC_SDMA_DESC_WTRMRK) { struct iowait_work *work; INIT_LIST_HEAD(&vnic_sdma->stx.list); vnic_sdma->stx.num_desc = HFI1_VNIC_SDMA_DESC_WTRMRK; work = iowait_get_ib_work(&vnic_sdma->wait); list_add_tail(&vnic_sdma->stx.list, &work->tx_head); } } } int hfi1_vnic_txreq_init(struct hfi1_devdata *dd) { char buf[HFI1_VNIC_TXREQ_NAME_LEN]; snprintf(buf, sizeof(buf), "hfi1_%u_vnic_txreq_cache", dd->unit); dd->vnic.txreq_cache = kmem_cache_create(buf, sizeof(struct vnic_txreq), 0, SLAB_HWCACHE_ALIGN, NULL); if (!dd->vnic.txreq_cache) return -ENOMEM; return 0; } void hfi1_vnic_txreq_deinit(struct hfi1_devdata *dd) { kmem_cache_destroy(dd->vnic.txreq_cache); dd->vnic.txreq_cache = NULL; }
linux-master
drivers/infiniband/hw/hfi1/vnic_sdma.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2020 Cornelis Networks, Inc. * Copyright(c) 2016 - 2017 Intel Corporation. */ #include <linux/list.h> #include <linux/rculist.h> #include <linux/mmu_notifier.h> #include <linux/interval_tree_generic.h> #include <linux/sched/mm.h> #include "mmu_rb.h" #include "trace.h" static unsigned long mmu_node_start(struct mmu_rb_node *); static unsigned long mmu_node_last(struct mmu_rb_node *); static int mmu_notifier_range_start(struct mmu_notifier *, const struct mmu_notifier_range *); static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *, unsigned long, unsigned long); static void release_immediate(struct kref *refcount); static void handle_remove(struct work_struct *work); static const struct mmu_notifier_ops mn_opts = { .invalidate_range_start = mmu_notifier_range_start, }; INTERVAL_TREE_DEFINE(struct mmu_rb_node, node, unsigned long, __last, mmu_node_start, mmu_node_last, static, __mmu_int_rb); static unsigned long mmu_node_start(struct mmu_rb_node *node) { return node->addr & PAGE_MASK; } static unsigned long mmu_node_last(struct mmu_rb_node *node) { return PAGE_ALIGN(node->addr + node->len) - 1; } int hfi1_mmu_rb_register(void *ops_arg, struct mmu_rb_ops *ops, struct workqueue_struct *wq, struct mmu_rb_handler **handler) { struct mmu_rb_handler *h; void *free_ptr; int ret; free_ptr = kzalloc(sizeof(*h) + cache_line_size() - 1, GFP_KERNEL); if (!free_ptr) return -ENOMEM; h = PTR_ALIGN(free_ptr, cache_line_size()); h->root = RB_ROOT_CACHED; h->ops = ops; h->ops_arg = ops_arg; INIT_HLIST_NODE(&h->mn.hlist); spin_lock_init(&h->lock); h->mn.ops = &mn_opts; INIT_WORK(&h->del_work, handle_remove); INIT_LIST_HEAD(&h->del_list); INIT_LIST_HEAD(&h->lru_list); h->wq = wq; h->free_ptr = free_ptr; ret = mmu_notifier_register(&h->mn, current->mm); if (ret) { kfree(free_ptr); return ret; } *handler = h; return 0; } void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler) { struct mmu_rb_node *rbnode; struct rb_node *node; unsigned long flags; struct list_head del_list; /* Prevent freeing of mm until we are completely finished. */ mmgrab(handler->mn.mm); /* Unregister first so we don't get any more notifications. */ mmu_notifier_unregister(&handler->mn, handler->mn.mm); /* * Make sure the wq delete handler is finished running. It will not * be triggered once the mmu notifiers are unregistered above. */ flush_work(&handler->del_work); INIT_LIST_HEAD(&del_list); spin_lock_irqsave(&handler->lock, flags); while ((node = rb_first_cached(&handler->root))) { rbnode = rb_entry(node, struct mmu_rb_node, node); rb_erase_cached(node, &handler->root); /* move from LRU list to delete list */ list_move(&rbnode->list, &del_list); } spin_unlock_irqrestore(&handler->lock, flags); while (!list_empty(&del_list)) { rbnode = list_first_entry(&del_list, struct mmu_rb_node, list); list_del(&rbnode->list); kref_put(&rbnode->refcount, release_immediate); } /* Now the mm may be freed. */ mmdrop(handler->mn.mm); kfree(handler->free_ptr); } int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler, struct mmu_rb_node *mnode) { struct mmu_rb_node *node; unsigned long flags; int ret = 0; trace_hfi1_mmu_rb_insert(mnode); if (current->mm != handler->mn.mm) return -EPERM; spin_lock_irqsave(&handler->lock, flags); node = __mmu_rb_search(handler, mnode->addr, mnode->len); if (node) { ret = -EEXIST; goto unlock; } __mmu_int_rb_insert(mnode, &handler->root); list_add_tail(&mnode->list, &handler->lru_list); mnode->handler = handler; unlock: spin_unlock_irqrestore(&handler->lock, flags); return ret; } /* Caller must hold handler lock */ struct mmu_rb_node *hfi1_mmu_rb_get_first(struct mmu_rb_handler *handler, unsigned long addr, unsigned long len) { struct mmu_rb_node *node; trace_hfi1_mmu_rb_search(addr, len); node = __mmu_int_rb_iter_first(&handler->root, addr, (addr + len) - 1); if (node) list_move_tail(&node->list, &handler->lru_list); return node; } /* Caller must hold handler lock */ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler, unsigned long addr, unsigned long len) { struct mmu_rb_node *node = NULL; trace_hfi1_mmu_rb_search(addr, len); if (!handler->ops->filter) { node = __mmu_int_rb_iter_first(&handler->root, addr, (addr + len) - 1); } else { for (node = __mmu_int_rb_iter_first(&handler->root, addr, (addr + len) - 1); node; node = __mmu_int_rb_iter_next(node, addr, (addr + len) - 1)) { if (handler->ops->filter(node, addr, len)) return node; } } return node; } /* * Must NOT call while holding mnode->handler->lock. * mnode->handler->ops->remove() may sleep and mnode->handler->lock is a * spinlock. */ static void release_immediate(struct kref *refcount) { struct mmu_rb_node *mnode = container_of(refcount, struct mmu_rb_node, refcount); trace_hfi1_mmu_release_node(mnode); mnode->handler->ops->remove(mnode->handler->ops_arg, mnode); } /* Caller must hold mnode->handler->lock */ static void release_nolock(struct kref *refcount) { struct mmu_rb_node *mnode = container_of(refcount, struct mmu_rb_node, refcount); list_move(&mnode->list, &mnode->handler->del_list); queue_work(mnode->handler->wq, &mnode->handler->del_work); } /* * struct mmu_rb_node->refcount kref_put() callback. * Adds mmu_rb_node to mmu_rb_node->handler->del_list and queues * handler->del_work on handler->wq. * Does not remove mmu_rb_node from handler->lru_list or handler->rb_root. * Acquires mmu_rb_node->handler->lock; do not call while already holding * handler->lock. */ void hfi1_mmu_rb_release(struct kref *refcount) { struct mmu_rb_node *mnode = container_of(refcount, struct mmu_rb_node, refcount); struct mmu_rb_handler *handler = mnode->handler; unsigned long flags; spin_lock_irqsave(&handler->lock, flags); list_move(&mnode->list, &mnode->handler->del_list); spin_unlock_irqrestore(&handler->lock, flags); queue_work(handler->wq, &handler->del_work); } void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg) { struct mmu_rb_node *rbnode, *ptr; struct list_head del_list; unsigned long flags; bool stop = false; if (current->mm != handler->mn.mm) return; INIT_LIST_HEAD(&del_list); spin_lock_irqsave(&handler->lock, flags); list_for_each_entry_safe(rbnode, ptr, &handler->lru_list, list) { /* refcount == 1 implies mmu_rb_handler has only rbnode ref */ if (kref_read(&rbnode->refcount) > 1) continue; if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg, &stop)) { __mmu_int_rb_remove(rbnode, &handler->root); /* move from LRU list to delete list */ list_move(&rbnode->list, &del_list); } if (stop) break; } spin_unlock_irqrestore(&handler->lock, flags); list_for_each_entry_safe(rbnode, ptr, &del_list, list) { trace_hfi1_mmu_rb_evict(rbnode); kref_put(&rbnode->refcount, release_immediate); } } static int mmu_notifier_range_start(struct mmu_notifier *mn, const struct mmu_notifier_range *range) { struct mmu_rb_handler *handler = container_of(mn, struct mmu_rb_handler, mn); struct rb_root_cached *root = &handler->root; struct mmu_rb_node *node, *ptr = NULL; unsigned long flags; spin_lock_irqsave(&handler->lock, flags); for (node = __mmu_int_rb_iter_first(root, range->start, range->end-1); node; node = ptr) { /* Guard against node removal. */ ptr = __mmu_int_rb_iter_next(node, range->start, range->end - 1); trace_hfi1_mmu_mem_invalidate(node); /* Remove from rb tree and lru_list. */ __mmu_int_rb_remove(node, root); list_del_init(&node->list); kref_put(&node->refcount, release_nolock); } spin_unlock_irqrestore(&handler->lock, flags); return 0; } /* * Work queue function to remove all nodes that have been queued up to * be removed. The key feature is that mm->mmap_lock is not being held * and the remove callback can sleep while taking it, if needed. */ static void handle_remove(struct work_struct *work) { struct mmu_rb_handler *handler = container_of(work, struct mmu_rb_handler, del_work); struct list_head del_list; unsigned long flags; struct mmu_rb_node *node; /* remove anything that is queued to get removed */ spin_lock_irqsave(&handler->lock, flags); list_replace_init(&handler->del_list, &del_list); spin_unlock_irqrestore(&handler->lock, flags); while (!list_empty(&del_list)) { node = list_first_entry(&del_list, struct mmu_rb_node, list); list_del(&node->list); trace_hfi1_mmu_release_node(node); handler->ops->remove(handler->ops_arg, node); } }
linux-master
drivers/infiniband/hw/hfi1/mmu_rb.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2018 Intel Corporation. */ #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/bitmap.h> #include "debugfs.h" #include "fault.h" #include "trace.h" #define HFI1_FAULT_DIR_TX BIT(0) #define HFI1_FAULT_DIR_RX BIT(1) #define HFI1_FAULT_DIR_TXRX (HFI1_FAULT_DIR_TX | HFI1_FAULT_DIR_RX) static void *_fault_stats_seq_start(struct seq_file *s, loff_t *pos) { struct hfi1_opcode_stats_perctx *opstats; if (*pos >= ARRAY_SIZE(opstats->stats)) return NULL; return pos; } static void *_fault_stats_seq_next(struct seq_file *s, void *v, loff_t *pos) { struct hfi1_opcode_stats_perctx *opstats; ++*pos; if (*pos >= ARRAY_SIZE(opstats->stats)) return NULL; return pos; } static void _fault_stats_seq_stop(struct seq_file *s, void *v) { } static int _fault_stats_seq_show(struct seq_file *s, void *v) { loff_t *spos = v; loff_t i = *spos, j; u64 n_packets = 0, n_bytes = 0; struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private; struct hfi1_devdata *dd = dd_from_dev(ibd); struct hfi1_ctxtdata *rcd; for (j = 0; j < dd->first_dyn_alloc_ctxt; j++) { rcd = hfi1_rcd_get_by_index(dd, j); if (rcd) { n_packets += rcd->opstats->stats[i].n_packets; n_bytes += rcd->opstats->stats[i].n_bytes; } hfi1_rcd_put(rcd); } for_each_possible_cpu(j) { struct hfi1_opcode_stats_perctx *sp = per_cpu_ptr(dd->tx_opstats, j); n_packets += sp->stats[i].n_packets; n_bytes += sp->stats[i].n_bytes; } if (!n_packets && !n_bytes) return SEQ_SKIP; if (!ibd->fault->n_rxfaults[i] && !ibd->fault->n_txfaults[i]) return SEQ_SKIP; seq_printf(s, "%02llx %llu/%llu (faults rx:%llu faults: tx:%llu)\n", i, (unsigned long long)n_packets, (unsigned long long)n_bytes, (unsigned long long)ibd->fault->n_rxfaults[i], (unsigned long long)ibd->fault->n_txfaults[i]); return 0; } DEBUGFS_SEQ_FILE_OPS(fault_stats); DEBUGFS_SEQ_FILE_OPEN(fault_stats); DEBUGFS_FILE_OPS(fault_stats); static int fault_opcodes_open(struct inode *inode, struct file *file) { file->private_data = inode->i_private; return nonseekable_open(inode, file); } static ssize_t fault_opcodes_write(struct file *file, const char __user *buf, size_t len, loff_t *pos) { ssize_t ret = 0; /* 1280 = 256 opcodes * 4 chars/opcode + 255 commas + NULL */ size_t copy, datalen = 1280; char *data, *token, *ptr, *end; struct fault *fault = file->private_data; data = kcalloc(datalen, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; copy = min(len, datalen - 1); if (copy_from_user(data, buf, copy)) { ret = -EFAULT; goto free_data; } ret = debugfs_file_get(file->f_path.dentry); if (unlikely(ret)) goto free_data; ptr = data; token = ptr; for (ptr = data; *ptr; ptr = end + 1, token = ptr) { char *dash; unsigned long range_start, range_end, i; bool remove = false; unsigned long bound = 1U << BITS_PER_BYTE; end = strchr(ptr, ','); if (end) *end = '\0'; if (token[0] == '-') { remove = true; token++; } dash = strchr(token, '-'); if (dash) *dash = '\0'; if (kstrtoul(token, 0, &range_start)) break; if (dash) { token = dash + 1; if (kstrtoul(token, 0, &range_end)) break; } else { range_end = range_start; } if (range_start == range_end && range_start == -1UL) { bitmap_zero(fault->opcodes, sizeof(fault->opcodes) * BITS_PER_BYTE); break; } /* Check the inputs */ if (range_start >= bound || range_end >= bound) break; for (i = range_start; i <= range_end; i++) { if (remove) clear_bit(i, fault->opcodes); else set_bit(i, fault->opcodes); } if (!end) break; } ret = len; debugfs_file_put(file->f_path.dentry); free_data: kfree(data); return ret; } static ssize_t fault_opcodes_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { ssize_t ret = 0; char *data; size_t datalen = 1280, size = 0; /* see fault_opcodes_write() */ unsigned long bit = 0, zero = 0; struct fault *fault = file->private_data; size_t bitsize = sizeof(fault->opcodes) * BITS_PER_BYTE; data = kcalloc(datalen, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; ret = debugfs_file_get(file->f_path.dentry); if (unlikely(ret)) goto free_data; bit = find_first_bit(fault->opcodes, bitsize); while (bit < bitsize) { zero = find_next_zero_bit(fault->opcodes, bitsize, bit); if (zero - 1 != bit) size += scnprintf(data + size, datalen - size - 1, "0x%lx-0x%lx,", bit, zero - 1); else size += scnprintf(data + size, datalen - size - 1, "0x%lx,", bit); bit = find_next_bit(fault->opcodes, bitsize, zero); } debugfs_file_put(file->f_path.dentry); data[size - 1] = '\n'; data[size] = '\0'; ret = simple_read_from_buffer(buf, len, pos, data, size); free_data: kfree(data); return ret; } static const struct file_operations __fault_opcodes_fops = { .owner = THIS_MODULE, .open = fault_opcodes_open, .read = fault_opcodes_read, .write = fault_opcodes_write, .llseek = no_llseek }; void hfi1_fault_exit_debugfs(struct hfi1_ibdev *ibd) { if (ibd->fault) debugfs_remove_recursive(ibd->fault->dir); kfree(ibd->fault); ibd->fault = NULL; } int hfi1_fault_init_debugfs(struct hfi1_ibdev *ibd) { struct dentry *parent = ibd->hfi1_ibdev_dbg; struct dentry *fault_dir; ibd->fault = kzalloc(sizeof(*ibd->fault), GFP_KERNEL); if (!ibd->fault) return -ENOMEM; ibd->fault->attr.interval = 1; ibd->fault->attr.require_end = ULONG_MAX; ibd->fault->attr.stacktrace_depth = 32; ibd->fault->attr.dname = NULL; ibd->fault->attr.verbose = 0; ibd->fault->enable = false; ibd->fault->opcode = false; ibd->fault->fault_skip = 0; ibd->fault->skip = 0; ibd->fault->direction = HFI1_FAULT_DIR_TXRX; ibd->fault->suppress_err = false; bitmap_zero(ibd->fault->opcodes, sizeof(ibd->fault->opcodes) * BITS_PER_BYTE); fault_dir = fault_create_debugfs_attr("fault", parent, &ibd->fault->attr); if (IS_ERR(fault_dir)) { kfree(ibd->fault); ibd->fault = NULL; return -ENOENT; } ibd->fault->dir = fault_dir; debugfs_create_file("fault_stats", 0444, fault_dir, ibd, &_fault_stats_file_ops); debugfs_create_bool("enable", 0600, fault_dir, &ibd->fault->enable); debugfs_create_bool("suppress_err", 0600, fault_dir, &ibd->fault->suppress_err); debugfs_create_bool("opcode_mode", 0600, fault_dir, &ibd->fault->opcode); debugfs_create_file("opcodes", 0600, fault_dir, ibd->fault, &__fault_opcodes_fops); debugfs_create_u64("skip_pkts", 0600, fault_dir, &ibd->fault->fault_skip); debugfs_create_u64("skip_usec", 0600, fault_dir, &ibd->fault->fault_skip_usec); debugfs_create_u8("direction", 0600, fault_dir, &ibd->fault->direction); return 0; } bool hfi1_dbg_fault_suppress_err(struct hfi1_ibdev *ibd) { if (ibd->fault) return ibd->fault->suppress_err; return false; } static bool __hfi1_should_fault(struct hfi1_ibdev *ibd, u32 opcode, u8 direction) { bool ret = false; if (!ibd->fault || !ibd->fault->enable) return false; if (!(ibd->fault->direction & direction)) return false; if (ibd->fault->opcode) { if (bitmap_empty(ibd->fault->opcodes, (sizeof(ibd->fault->opcodes) * BITS_PER_BYTE))) return false; if (!(test_bit(opcode, ibd->fault->opcodes))) return false; } if (ibd->fault->fault_skip_usec && time_before(jiffies, ibd->fault->skip_usec)) return false; if (ibd->fault->fault_skip && ibd->fault->skip) { ibd->fault->skip--; return false; } ret = should_fail(&ibd->fault->attr, 1); if (ret) { ibd->fault->skip = ibd->fault->fault_skip; ibd->fault->skip_usec = jiffies + usecs_to_jiffies(ibd->fault->fault_skip_usec); } return ret; } bool hfi1_dbg_should_fault_tx(struct rvt_qp *qp, u32 opcode) { struct hfi1_ibdev *ibd = to_idev(qp->ibqp.device); if (__hfi1_should_fault(ibd, opcode, HFI1_FAULT_DIR_TX)) { trace_hfi1_fault_opcode(qp, opcode); ibd->fault->n_txfaults[opcode]++; return true; } return false; } bool hfi1_dbg_should_fault_rx(struct hfi1_packet *packet) { struct hfi1_ibdev *ibd = &packet->rcd->dd->verbs_dev; if (__hfi1_should_fault(ibd, packet->opcode, HFI1_FAULT_DIR_RX)) { trace_hfi1_fault_packet(packet); ibd->fault->n_rxfaults[packet->opcode]++; return true; } return false; }
linux-master
drivers/infiniband/hw/hfi1/fault.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2015-2017 Intel Corporation. */ #include <linux/mm.h> #include <linux/sched/signal.h> #include <linux/device.h> #include <linux/module.h> #include "hfi.h" static unsigned long cache_size = 256; module_param(cache_size, ulong, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(cache_size, "Send and receive side cache size limit (in MB)"); /* * Determine whether the caller can pin pages. * * This function should be used in the implementation of buffer caches. * The cache implementation should call this function prior to attempting * to pin buffer pages in order to determine whether they should do so. * The function computes cache limits based on the configured ulimit and * cache size. Use of this function is especially important for caches * which are not limited in any other way (e.g. by HW resources) and, thus, * could keeping caching buffers. * */ bool hfi1_can_pin_pages(struct hfi1_devdata *dd, struct mm_struct *mm, u32 nlocked, u32 npages) { unsigned long ulimit_pages; unsigned long cache_limit_pages; unsigned int usr_ctxts; /* * Perform RLIMIT_MEMLOCK based checks unless CAP_IPC_LOCK is present. */ if (!capable(CAP_IPC_LOCK)) { ulimit_pages = DIV_ROUND_DOWN_ULL(rlimit(RLIMIT_MEMLOCK), PAGE_SIZE); /* * Pinning these pages would exceed this process's locked memory * limit. */ if (atomic64_read(&mm->pinned_vm) + npages > ulimit_pages) return false; /* * Only allow 1/4 of the user's RLIMIT_MEMLOCK to be used for HFI * caches. This fraction is then equally distributed among all * existing user contexts. Note that if RLIMIT_MEMLOCK is * 'unlimited' (-1), the value of this limit will be > 2^42 pages * (2^64 / 2^12 / 2^8 / 2^2). * * The effectiveness of this check may be reduced if I/O occurs on * some user contexts before all user contexts are created. This * check assumes that this process is the only one using this * context (e.g., the corresponding fd was not passed to another * process for concurrent access) as there is no per-context, * per-process tracking of pinned pages. It also assumes that each * user context has only one cache to limit. */ usr_ctxts = dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt; if (nlocked + npages > (ulimit_pages / usr_ctxts / 4)) return false; } /* * Pinning these pages would exceed the size limit for this cache. */ cache_limit_pages = cache_size * (1024 * 1024) / PAGE_SIZE; if (nlocked + npages > cache_limit_pages) return false; return true; } int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t npages, bool writable, struct page **pages) { int ret; unsigned int gup_flags = FOLL_LONGTERM | (writable ? FOLL_WRITE : 0); ret = pin_user_pages_fast(vaddr, npages, gup_flags, pages); if (ret < 0) return ret; atomic64_add(ret, &mm->pinned_vm); return ret; } void hfi1_release_user_pages(struct mm_struct *mm, struct page **p, size_t npages, bool dirty) { unpin_user_pages_dirty_lock(p, npages, dirty); if (mm) { /* during close after signal, mm can be NULL */ atomic64_sub(npages, &mm->pinned_vm); } }
linux-master
drivers/infiniband/hw/hfi1/user_pages.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2015, 2016 Intel Corporation. */ #include "hfi.h" /* additive distance between non-SOP and SOP space */ #define SOP_DISTANCE (TXE_PIO_SIZE / 2) #define PIO_BLOCK_MASK (PIO_BLOCK_SIZE - 1) /* number of QUADWORDs in a block */ #define PIO_BLOCK_QWS (PIO_BLOCK_SIZE / sizeof(u64)) /** * pio_copy - copy data block to MMIO space * @dd: hfi1 dev data * @pbuf: a number of blocks allocated within a PIO send context * @pbc: PBC to send * @from: source, must be 8 byte aligned * @count: number of DWORD (32-bit) quantities to copy from source * * Copy data from source to PIO Send Buffer memory, 8 bytes at a time. * Must always write full BLOCK_SIZE bytes blocks. The first block must * be written to the corresponding SOP=1 address. * * Known: * o pbuf->start always starts on a block boundary * o pbuf can wrap only at a block boundary */ void pio_copy(struct hfi1_devdata *dd, struct pio_buf *pbuf, u64 pbc, const void *from, size_t count) { void __iomem *dest = pbuf->start + SOP_DISTANCE; void __iomem *send = dest + PIO_BLOCK_SIZE; void __iomem *dend; /* 8-byte data end */ /* write the PBC */ writeq(pbc, dest); dest += sizeof(u64); /* calculate where the QWORD data ends - in SOP=1 space */ dend = dest + ((count >> 1) * sizeof(u64)); if (dend < send) { /* * all QWORD data is within the SOP block, does *not* * reach the end of the SOP block */ while (dest < dend) { writeq(*(u64 *)from, dest); from += sizeof(u64); dest += sizeof(u64); } /* * No boundary checks are needed here: * 0. We're not on the SOP block boundary * 1. The possible DWORD dangle will still be within * the SOP block * 2. We cannot wrap except on a block boundary. */ } else { /* QWORD data extends _to_ or beyond the SOP block */ /* write 8-byte SOP chunk data */ while (dest < send) { writeq(*(u64 *)from, dest); from += sizeof(u64); dest += sizeof(u64); } /* drop out of the SOP range */ dest -= SOP_DISTANCE; dend -= SOP_DISTANCE; /* * If the wrap comes before or matches the data end, * copy until until the wrap, then wrap. * * If the data ends at the end of the SOP above and * the buffer wraps, then pbuf->end == dend == dest * and nothing will get written, but we will wrap in * case there is a dangling DWORD. */ if (pbuf->end <= dend) { while (dest < pbuf->end) { writeq(*(u64 *)from, dest); from += sizeof(u64); dest += sizeof(u64); } dest -= pbuf->sc->size; dend -= pbuf->sc->size; } /* write 8-byte non-SOP, non-wrap chunk data */ while (dest < dend) { writeq(*(u64 *)from, dest); from += sizeof(u64); dest += sizeof(u64); } } /* at this point we have wrapped if we are going to wrap */ /* write dangling u32, if any */ if (count & 1) { union mix val; val.val64 = 0; val.val32[0] = *(u32 *)from; writeq(val.val64, dest); dest += sizeof(u64); } /* * fill in rest of block, no need to check pbuf->end * as we only wrap on a block boundary */ while (((unsigned long)dest & PIO_BLOCK_MASK) != 0) { writeq(0, dest); dest += sizeof(u64); } /* finished with this buffer */ this_cpu_dec(*pbuf->sc->buffers_allocated); preempt_enable(); } /* * Handle carry bytes using shifts and masks. * * NOTE: the value the unused portion of carry is expected to always be zero. */ /* * "zero" shift - bit shift used to zero out upper bytes. Input is * the count of LSB bytes to preserve. */ #define zshift(x) (8 * (8 - (x))) /* * "merge" shift - bit shift used to merge with carry bytes. Input is * the LSB byte count to move beyond. */ #define mshift(x) (8 * (x)) /* * Jump copy - no-loop copy for < 8 bytes. */ static inline void jcopy(u8 *dest, const u8 *src, u32 n) { switch (n) { case 7: *dest++ = *src++; fallthrough; case 6: *dest++ = *src++; fallthrough; case 5: *dest++ = *src++; fallthrough; case 4: *dest++ = *src++; fallthrough; case 3: *dest++ = *src++; fallthrough; case 2: *dest++ = *src++; fallthrough; case 1: *dest++ = *src++; } } /* * Read nbytes from "from" and place them in the low bytes * of pbuf->carry. Other bytes are left as-is. Any previous * value in pbuf->carry is lost. * * NOTES: * o do not read from from if nbytes is zero * o from may _not_ be u64 aligned. */ static inline void read_low_bytes(struct pio_buf *pbuf, const void *from, unsigned int nbytes) { pbuf->carry.val64 = 0; jcopy(&pbuf->carry.val8[0], from, nbytes); pbuf->carry_bytes = nbytes; } /* * Read nbytes bytes from "from" and put them at the end of pbuf->carry. * It is expected that the extra read does not overfill carry. * * NOTES: * o from may _not_ be u64 aligned * o nbytes may span a QW boundary */ static inline void read_extra_bytes(struct pio_buf *pbuf, const void *from, unsigned int nbytes) { jcopy(&pbuf->carry.val8[pbuf->carry_bytes], from, nbytes); pbuf->carry_bytes += nbytes; } /* * Write a quad word using parts of pbuf->carry and the next 8 bytes of src. * Put the unused part of the next 8 bytes of src into the LSB bytes of * pbuf->carry with the upper bytes zeroed.. * * NOTES: * o result must keep unused bytes zeroed * o src must be u64 aligned */ static inline void merge_write8( struct pio_buf *pbuf, void __iomem *dest, const void *src) { u64 new, temp; new = *(u64 *)src; temp = pbuf->carry.val64 | (new << mshift(pbuf->carry_bytes)); writeq(temp, dest); pbuf->carry.val64 = new >> zshift(pbuf->carry_bytes); } /* * Write a quad word using all bytes of carry. */ static inline void carry8_write8(union mix carry, void __iomem *dest) { writeq(carry.val64, dest); } /* * Write a quad word using all the valid bytes of carry. If carry * has zero valid bytes, nothing is written. * Returns 0 on nothing written, non-zero on quad word written. */ static inline int carry_write8(struct pio_buf *pbuf, void __iomem *dest) { if (pbuf->carry_bytes) { /* unused bytes are always kept zeroed, so just write */ writeq(pbuf->carry.val64, dest); return 1; } return 0; } /* * Segmented PIO Copy - start * * Start a PIO copy. * * @pbuf: destination buffer * @pbc: the PBC for the PIO buffer * @from: data source, QWORD aligned * @nbytes: bytes to copy */ void seg_pio_copy_start(struct pio_buf *pbuf, u64 pbc, const void *from, size_t nbytes) { void __iomem *dest = pbuf->start + SOP_DISTANCE; void __iomem *send = dest + PIO_BLOCK_SIZE; void __iomem *dend; /* 8-byte data end */ writeq(pbc, dest); dest += sizeof(u64); /* calculate where the QWORD data ends - in SOP=1 space */ dend = dest + ((nbytes >> 3) * sizeof(u64)); if (dend < send) { /* * all QWORD data is within the SOP block, does *not* * reach the end of the SOP block */ while (dest < dend) { writeq(*(u64 *)from, dest); from += sizeof(u64); dest += sizeof(u64); } /* * No boundary checks are needed here: * 0. We're not on the SOP block boundary * 1. The possible DWORD dangle will still be within * the SOP block * 2. We cannot wrap except on a block boundary. */ } else { /* QWORD data extends _to_ or beyond the SOP block */ /* write 8-byte SOP chunk data */ while (dest < send) { writeq(*(u64 *)from, dest); from += sizeof(u64); dest += sizeof(u64); } /* drop out of the SOP range */ dest -= SOP_DISTANCE; dend -= SOP_DISTANCE; /* * If the wrap comes before or matches the data end, * copy until until the wrap, then wrap. * * If the data ends at the end of the SOP above and * the buffer wraps, then pbuf->end == dend == dest * and nothing will get written, but we will wrap in * case there is a dangling DWORD. */ if (pbuf->end <= dend) { while (dest < pbuf->end) { writeq(*(u64 *)from, dest); from += sizeof(u64); dest += sizeof(u64); } dest -= pbuf->sc->size; dend -= pbuf->sc->size; } /* write 8-byte non-SOP, non-wrap chunk data */ while (dest < dend) { writeq(*(u64 *)from, dest); from += sizeof(u64); dest += sizeof(u64); } } /* at this point we have wrapped if we are going to wrap */ /* ...but it doesn't matter as we're done writing */ /* save dangling bytes, if any */ read_low_bytes(pbuf, from, nbytes & 0x7); pbuf->qw_written = 1 /*PBC*/ + (nbytes >> 3); } /* * Mid copy helper, "mixed case" - source is 64-bit aligned but carry * bytes are non-zero. * * Whole u64s must be written to the chip, so bytes must be manually merged. * * @pbuf: destination buffer * @from: data source, is QWORD aligned. * @nbytes: bytes to copy * * Must handle nbytes < 8. */ static void mid_copy_mix(struct pio_buf *pbuf, const void *from, size_t nbytes) { void __iomem *dest = pbuf->start + (pbuf->qw_written * sizeof(u64)); void __iomem *dend; /* 8-byte data end */ unsigned long qw_to_write = nbytes >> 3; unsigned long bytes_left = nbytes & 0x7; /* calculate 8-byte data end */ dend = dest + (qw_to_write * sizeof(u64)); if (pbuf->qw_written < PIO_BLOCK_QWS) { /* * Still within SOP block. We don't need to check for * wrap because we are still in the first block and * can only wrap on block boundaries. */ void __iomem *send; /* SOP end */ void __iomem *xend; /* * calculate the end of data or end of block, whichever * comes first */ send = pbuf->start + PIO_BLOCK_SIZE; xend = min(send, dend); /* shift up to SOP=1 space */ dest += SOP_DISTANCE; xend += SOP_DISTANCE; /* write 8-byte chunk data */ while (dest < xend) { merge_write8(pbuf, dest, from); from += sizeof(u64); dest += sizeof(u64); } /* shift down to SOP=0 space */ dest -= SOP_DISTANCE; } /* * At this point dest could be (either, both, or neither): * - at dend * - at the wrap */ /* * If the wrap comes before or matches the data end, * copy until until the wrap, then wrap. * * If dest is at the wrap, we will fall into the if, * not do the loop, when wrap. * * If the data ends at the end of the SOP above and * the buffer wraps, then pbuf->end == dend == dest * and nothing will get written. */ if (pbuf->end <= dend) { while (dest < pbuf->end) { merge_write8(pbuf, dest, from); from += sizeof(u64); dest += sizeof(u64); } dest -= pbuf->sc->size; dend -= pbuf->sc->size; } /* write 8-byte non-SOP, non-wrap chunk data */ while (dest < dend) { merge_write8(pbuf, dest, from); from += sizeof(u64); dest += sizeof(u64); } pbuf->qw_written += qw_to_write; /* handle carry and left-over bytes */ if (pbuf->carry_bytes + bytes_left >= 8) { unsigned long nread; /* there is enough to fill another qw - fill carry */ nread = 8 - pbuf->carry_bytes; read_extra_bytes(pbuf, from, nread); /* * One more write - but need to make sure dest is correct. * Check for wrap and the possibility the write * should be in SOP space. * * The two checks immediately below cannot both be true, hence * the else. If we have wrapped, we cannot still be within the * first block. Conversely, if we are still in the first block, * we cannot have wrapped. We do the wrap check first as that * is more likely. */ /* adjust if we have wrapped */ if (dest >= pbuf->end) dest -= pbuf->sc->size; /* jump to the SOP range if within the first block */ else if (pbuf->qw_written < PIO_BLOCK_QWS) dest += SOP_DISTANCE; /* flush out full carry */ carry8_write8(pbuf->carry, dest); pbuf->qw_written++; /* now adjust and read the rest of the bytes into carry */ bytes_left -= nread; from += nread; /* from is now not aligned */ read_low_bytes(pbuf, from, bytes_left); } else { /* not enough to fill another qw, append the rest to carry */ read_extra_bytes(pbuf, from, bytes_left); } } /* * Mid copy helper, "straight case" - source pointer is 64-bit aligned * with no carry bytes. * * @pbuf: destination buffer * @from: data source, is QWORD aligned * @nbytes: bytes to copy * * Must handle nbytes < 8. */ static void mid_copy_straight(struct pio_buf *pbuf, const void *from, size_t nbytes) { void __iomem *dest = pbuf->start + (pbuf->qw_written * sizeof(u64)); void __iomem *dend; /* 8-byte data end */ /* calculate 8-byte data end */ dend = dest + ((nbytes >> 3) * sizeof(u64)); if (pbuf->qw_written < PIO_BLOCK_QWS) { /* * Still within SOP block. We don't need to check for * wrap because we are still in the first block and * can only wrap on block boundaries. */ void __iomem *send; /* SOP end */ void __iomem *xend; /* * calculate the end of data or end of block, whichever * comes first */ send = pbuf->start + PIO_BLOCK_SIZE; xend = min(send, dend); /* shift up to SOP=1 space */ dest += SOP_DISTANCE; xend += SOP_DISTANCE; /* write 8-byte chunk data */ while (dest < xend) { writeq(*(u64 *)from, dest); from += sizeof(u64); dest += sizeof(u64); } /* shift down to SOP=0 space */ dest -= SOP_DISTANCE; } /* * At this point dest could be (either, both, or neither): * - at dend * - at the wrap */ /* * If the wrap comes before or matches the data end, * copy until until the wrap, then wrap. * * If dest is at the wrap, we will fall into the if, * not do the loop, when wrap. * * If the data ends at the end of the SOP above and * the buffer wraps, then pbuf->end == dend == dest * and nothing will get written. */ if (pbuf->end <= dend) { while (dest < pbuf->end) { writeq(*(u64 *)from, dest); from += sizeof(u64); dest += sizeof(u64); } dest -= pbuf->sc->size; dend -= pbuf->sc->size; } /* write 8-byte non-SOP, non-wrap chunk data */ while (dest < dend) { writeq(*(u64 *)from, dest); from += sizeof(u64); dest += sizeof(u64); } /* we know carry_bytes was zero on entry to this routine */ read_low_bytes(pbuf, from, nbytes & 0x7); pbuf->qw_written += nbytes >> 3; } /* * Segmented PIO Copy - middle * * Must handle any aligned tail and any aligned source with any byte count. * * @pbuf: a number of blocks allocated within a PIO send context * @from: data source * @nbytes: number of bytes to copy */ void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes) { unsigned long from_align = (unsigned long)from & 0x7; if (pbuf->carry_bytes + nbytes < 8) { /* not enough bytes to fill a QW */ read_extra_bytes(pbuf, from, nbytes); return; } if (from_align) { /* misaligned source pointer - align it */ unsigned long to_align; /* bytes to read to align "from" */ to_align = 8 - from_align; /* * In the advance-to-alignment logic below, we do not need * to check if we are using more than nbytes. This is because * if we are here, we already know that carry+nbytes will * fill at least one QW. */ if (pbuf->carry_bytes + to_align < 8) { /* not enough align bytes to fill a QW */ read_extra_bytes(pbuf, from, to_align); from += to_align; nbytes -= to_align; } else { /* bytes to fill carry */ unsigned long to_fill = 8 - pbuf->carry_bytes; /* bytes left over to be read */ unsigned long extra = to_align - to_fill; void __iomem *dest; /* fill carry... */ read_extra_bytes(pbuf, from, to_fill); from += to_fill; nbytes -= to_fill; /* may not be enough valid bytes left to align */ if (extra > nbytes) extra = nbytes; /* ...now write carry */ dest = pbuf->start + (pbuf->qw_written * sizeof(u64)); /* * The two checks immediately below cannot both be * true, hence the else. If we have wrapped, we * cannot still be within the first block. * Conversely, if we are still in the first block, we * cannot have wrapped. We do the wrap check first * as that is more likely. */ /* adjust if we've wrapped */ if (dest >= pbuf->end) dest -= pbuf->sc->size; /* jump to SOP range if within the first block */ else if (pbuf->qw_written < PIO_BLOCK_QWS) dest += SOP_DISTANCE; carry8_write8(pbuf->carry, dest); pbuf->qw_written++; /* read any extra bytes to do final alignment */ /* this will overwrite anything in pbuf->carry */ read_low_bytes(pbuf, from, extra); from += extra; nbytes -= extra; /* * If no bytes are left, return early - we are done. * NOTE: This short-circuit is *required* because * "extra" may have been reduced in size and "from" * is not aligned, as required when leaving this * if block. */ if (nbytes == 0) return; } /* at this point, from is QW aligned */ } if (pbuf->carry_bytes) mid_copy_mix(pbuf, from, nbytes); else mid_copy_straight(pbuf, from, nbytes); } /* * Segmented PIO Copy - end * * Write any remainder (in pbuf->carry) and finish writing the whole block. * * @pbuf: a number of blocks allocated within a PIO send context */ void seg_pio_copy_end(struct pio_buf *pbuf) { void __iomem *dest = pbuf->start + (pbuf->qw_written * sizeof(u64)); /* * The two checks immediately below cannot both be true, hence the * else. If we have wrapped, we cannot still be within the first * block. Conversely, if we are still in the first block, we * cannot have wrapped. We do the wrap check first as that is * more likely. */ /* adjust if we have wrapped */ if (dest >= pbuf->end) dest -= pbuf->sc->size; /* jump to the SOP range if within the first block */ else if (pbuf->qw_written < PIO_BLOCK_QWS) dest += SOP_DISTANCE; /* write final bytes, if any */ if (carry_write8(pbuf, dest)) { dest += sizeof(u64); /* * NOTE: We do not need to recalculate whether dest needs * SOP_DISTANCE or not. * * If we are in the first block and the dangle write * keeps us in the same block, dest will need * to retain SOP_DISTANCE in the loop below. * * If we are in the first block and the dangle write pushes * us to the next block, then loop below will not run * and dest is not used. Hence we do not need to update * it. * * If we are past the first block, then SOP_DISTANCE * was never added, so there is nothing to do. */ } /* fill in rest of block */ while (((unsigned long)dest & PIO_BLOCK_MASK) != 0) { writeq(0, dest); dest += sizeof(u64); } /* finished with this buffer */ this_cpu_dec(*pbuf->sc->buffers_allocated); preempt_enable(); }
linux-master
drivers/infiniband/hw/hfi1/pio_copy.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2015 - 2019 Intel Corporation. */ #include <linux/net.h> #include <rdma/ib_smi.h> #include "hfi.h" #include "mad.h" #include "verbs_txreq.h" #include "trace_ibhdrs.h" #include "qp.h" /* We support only two types - 9B and 16B for now */ static const hfi1_make_req hfi1_make_ud_req_tbl[2] = { [HFI1_PKT_TYPE_9B] = &hfi1_make_ud_req_9B, [HFI1_PKT_TYPE_16B] = &hfi1_make_ud_req_16B }; /** * ud_loopback - handle send on loopback QPs * @sqp: the sending QP * @swqe: the send work request * * This is called from hfi1_make_ud_req() to forward a WQE addressed * to the same HFI. * Note that the receive interrupt handler may be calling hfi1_ud_rcv() * while this is being called. */ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) { struct hfi1_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num); struct hfi1_pportdata *ppd; struct hfi1_qp_priv *priv = sqp->priv; struct rvt_qp *qp; struct rdma_ah_attr *ah_attr; unsigned long flags; struct rvt_sge_state ssge; struct rvt_sge *sge; struct ib_wc wc; u32 length; enum ib_qp_type sqptype, dqptype; rcu_read_lock(); qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), &ibp->rvp, rvt_get_swqe_remote_qpn(swqe)); if (!qp) { ibp->rvp.n_pkt_drops++; rcu_read_unlock(); return; } sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ? IB_QPT_UD : sqp->ibqp.qp_type; dqptype = qp->ibqp.qp_type == IB_QPT_GSI ? IB_QPT_UD : qp->ibqp.qp_type; if (dqptype != sqptype || !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) { ibp->rvp.n_pkt_drops++; goto drop; } ah_attr = rvt_get_swqe_ah_attr(swqe); ppd = ppd_from_ibp(ibp); if (qp->ibqp.qp_num > 1) { u16 pkey; u32 slid; u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)]; pkey = hfi1_get_pkey(ibp, sqp->s_pkey_index); slid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) & ((1 << ppd->lmc) - 1)); if (unlikely(ingress_pkey_check(ppd, pkey, sc5, qp->s_pkey_index, slid, false))) { hfi1_bad_pkey(ibp, pkey, rdma_ah_get_sl(ah_attr), sqp->ibqp.qp_num, qp->ibqp.qp_num, slid, rdma_ah_get_dlid(ah_attr)); goto drop; } } /* * Check that the qkey matches (except for QP0, see 9.6.1.4.1). * Qkeys with the high order bit set mean use the * qkey from the QP context instead of the WR (see 10.2.5). */ if (qp->ibqp.qp_num) { u32 qkey; qkey = (int)rvt_get_swqe_remote_qkey(swqe) < 0 ? sqp->qkey : rvt_get_swqe_remote_qkey(swqe); if (unlikely(qkey != qp->qkey)) goto drop; /* silently drop per IBTA spec */ } /* * A GRH is expected to precede the data even if not * present on the wire. */ length = swqe->length; memset(&wc, 0, sizeof(wc)); wc.byte_len = length + sizeof(struct ib_grh); if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) { wc.wc_flags = IB_WC_WITH_IMM; wc.ex.imm_data = swqe->wr.ex.imm_data; } spin_lock_irqsave(&qp->r_lock, flags); /* * Get the next work request entry to find where to put the data. */ if (qp->r_flags & RVT_R_REUSE_SGE) { qp->r_flags &= ~RVT_R_REUSE_SGE; } else { int ret; ret = rvt_get_rwqe(qp, false); if (ret < 0) { rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR); goto bail_unlock; } if (!ret) { if (qp->ibqp.qp_num == 0) ibp->rvp.n_vl15_dropped++; goto bail_unlock; } } /* Silently drop packets which are too big. */ if (unlikely(wc.byte_len > qp->r_len)) { qp->r_flags |= RVT_R_REUSE_SGE; ibp->rvp.n_pkt_drops++; goto bail_unlock; } if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) { struct ib_grh grh; struct ib_global_route grd = *(rdma_ah_read_grh(ah_attr)); /* * For loopback packets with extended LIDs, the * sgid_index in the GRH is 0 and the dgid is * OPA GID of the sender. While creating a response * to the loopback packet, IB core creates the new * sgid_index from the DGID and that will be the * OPA_GID_INDEX. The new dgid is from the sgid * index and that will be in the IB GID format. * * We now have a case where the sent packet had a * different sgid_index and dgid compared to the * one that was received in response. * * Fix this inconsistency. */ if (priv->hdr_type == HFI1_PKT_TYPE_16B) { if (grd.sgid_index == 0) grd.sgid_index = OPA_GID_INDEX; if (ib_is_opa_gid(&grd.dgid)) grd.dgid.global.interface_id = cpu_to_be64(ppd->guids[HFI1_PORT_GUID_INDEX]); } hfi1_make_grh(ibp, &grh, &grd, 0, 0); rvt_copy_sge(qp, &qp->r_sge, &grh, sizeof(grh), true, false); wc.wc_flags |= IB_WC_GRH; } else { rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true); } ssge.sg_list = swqe->sg_list + 1; ssge.sge = *swqe->sg_list; ssge.num_sge = swqe->wr.num_sge; sge = &ssge.sge; while (length) { u32 len = rvt_get_sge_length(sge, length); WARN_ON_ONCE(len == 0); rvt_copy_sge(qp, &qp->r_sge, sge->vaddr, len, true, false); rvt_update_sge(&ssge, len, false); length -= len; } rvt_put_ss(&qp->r_sge); if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) goto bail_unlock; wc.wr_id = qp->r_wr_id; wc.status = IB_WC_SUCCESS; wc.opcode = IB_WC_RECV; wc.qp = &qp->ibqp; wc.src_qp = sqp->ibqp.qp_num; if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI) { if (sqp->ibqp.qp_type == IB_QPT_GSI || sqp->ibqp.qp_type == IB_QPT_SMI) wc.pkey_index = rvt_get_swqe_pkey_index(swqe); else wc.pkey_index = sqp->s_pkey_index; } else { wc.pkey_index = 0; } wc.slid = (ppd->lid | (rdma_ah_get_path_bits(ah_attr) & ((1 << ppd->lmc) - 1))) & U16_MAX; /* Check for loopback when the port lid is not set */ if (wc.slid == 0 && sqp->ibqp.qp_type == IB_QPT_GSI) wc.slid = be16_to_cpu(IB_LID_PERMISSIVE); wc.sl = rdma_ah_get_sl(ah_attr); wc.dlid_path_bits = rdma_ah_get_dlid(ah_attr) & ((1 << ppd->lmc) - 1); wc.port_num = qp->port_num; /* Signal completion event if the solicited bit is set. */ rvt_recv_cq(qp, &wc, swqe->wr.send_flags & IB_SEND_SOLICITED); ibp->rvp.n_loop_pkts++; bail_unlock: spin_unlock_irqrestore(&qp->r_lock, flags); drop: rcu_read_unlock(); } static void hfi1_make_bth_deth(struct rvt_qp *qp, struct rvt_swqe *wqe, struct ib_other_headers *ohdr, u16 *pkey, u32 extra_bytes, bool bypass) { u32 bth0; struct hfi1_ibport *ibp; ibp = to_iport(qp->ibqp.device, qp->port_num); if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { ohdr->u.ud.imm_data = wqe->wr.ex.imm_data; bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24; } else { bth0 = IB_OPCODE_UD_SEND_ONLY << 24; } if (wqe->wr.send_flags & IB_SEND_SOLICITED) bth0 |= IB_BTH_SOLICITED; bth0 |= extra_bytes << 20; if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI) *pkey = hfi1_get_pkey(ibp, rvt_get_swqe_pkey_index(wqe)); else *pkey = hfi1_get_pkey(ibp, qp->s_pkey_index); if (!bypass) bth0 |= *pkey; ohdr->bth[0] = cpu_to_be32(bth0); ohdr->bth[1] = cpu_to_be32(rvt_get_swqe_remote_qpn(wqe)); ohdr->bth[2] = cpu_to_be32(mask_psn(wqe->psn)); /* * Qkeys with the high order bit set mean use the * qkey from the QP context instead of the WR (see 10.2.5). */ ohdr->u.ud.deth[0] = cpu_to_be32((int)rvt_get_swqe_remote_qkey(wqe) < 0 ? qp->qkey : rvt_get_swqe_remote_qkey(wqe)); ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num); } void hfi1_make_ud_req_9B(struct rvt_qp *qp, struct hfi1_pkt_state *ps, struct rvt_swqe *wqe) { u32 nwords, extra_bytes; u16 len, slid, dlid, pkey; u16 lrh0 = 0; u8 sc5; struct hfi1_qp_priv *priv = qp->priv; struct ib_other_headers *ohdr; struct rdma_ah_attr *ah_attr; struct hfi1_pportdata *ppd; struct hfi1_ibport *ibp; struct ib_grh *grh; ibp = to_iport(qp->ibqp.device, qp->port_num); ppd = ppd_from_ibp(ibp); ah_attr = rvt_get_swqe_ah_attr(wqe); extra_bytes = -wqe->length & 3; nwords = ((wqe->length + extra_bytes) >> 2) + SIZE_OF_CRC; /* header size in dwords LRH+BTH+DETH = (8+12+8)/4. */ ps->s_txreq->hdr_dwords = 7; if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) ps->s_txreq->hdr_dwords++; if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) { grh = &ps->s_txreq->phdr.hdr.ibh.u.l.grh; ps->s_txreq->hdr_dwords += hfi1_make_grh(ibp, grh, rdma_ah_read_grh(ah_attr), ps->s_txreq->hdr_dwords - LRH_9B_DWORDS, nwords); lrh0 = HFI1_LRH_GRH; ohdr = &ps->s_txreq->phdr.hdr.ibh.u.l.oth; } else { lrh0 = HFI1_LRH_BTH; ohdr = &ps->s_txreq->phdr.hdr.ibh.u.oth; } sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)]; lrh0 |= (rdma_ah_get_sl(ah_attr) & 0xf) << 4; if (qp->ibqp.qp_type == IB_QPT_SMI) { lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */ priv->s_sc = 0xf; } else { lrh0 |= (sc5 & 0xf) << 12; priv->s_sc = sc5; } dlid = opa_get_lid(rdma_ah_get_dlid(ah_attr), 9B); if (dlid == be16_to_cpu(IB_LID_PERMISSIVE)) { slid = be16_to_cpu(IB_LID_PERMISSIVE); } else { u16 lid = (u16)ppd->lid; if (lid) { lid |= rdma_ah_get_path_bits(ah_attr) & ((1 << ppd->lmc) - 1); slid = lid; } else { slid = be16_to_cpu(IB_LID_PERMISSIVE); } } hfi1_make_bth_deth(qp, wqe, ohdr, &pkey, extra_bytes, false); len = ps->s_txreq->hdr_dwords + nwords; /* Setup the packet */ ps->s_txreq->phdr.hdr.hdr_type = HFI1_PKT_TYPE_9B; hfi1_make_ib_hdr(&ps->s_txreq->phdr.hdr.ibh, lrh0, len, dlid, slid); } void hfi1_make_ud_req_16B(struct rvt_qp *qp, struct hfi1_pkt_state *ps, struct rvt_swqe *wqe) { struct hfi1_qp_priv *priv = qp->priv; struct ib_other_headers *ohdr; struct rdma_ah_attr *ah_attr; struct hfi1_pportdata *ppd; struct hfi1_ibport *ibp; u32 dlid, slid, nwords, extra_bytes; u32 dest_qp = rvt_get_swqe_remote_qpn(wqe); u32 src_qp = qp->ibqp.qp_num; u16 len, pkey; u8 l4, sc5; bool is_mgmt = false; ibp = to_iport(qp->ibqp.device, qp->port_num); ppd = ppd_from_ibp(ibp); ah_attr = rvt_get_swqe_ah_attr(wqe); /* * Build 16B Management Packet if either the destination * or source queue pair number is 0 or 1. */ if (dest_qp == 0 || src_qp == 0 || dest_qp == 1 || src_qp == 1) { /* header size in dwords 16B LRH+L4_FM = (16+8)/4. */ ps->s_txreq->hdr_dwords = 6; is_mgmt = true; } else { /* header size in dwords 16B LRH+BTH+DETH = (16+12+8)/4. */ ps->s_txreq->hdr_dwords = 9; if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) ps->s_txreq->hdr_dwords++; } /* SW provides space for CRC and LT for bypass packets. */ extra_bytes = hfi1_get_16b_padding((ps->s_txreq->hdr_dwords << 2), wqe->length); nwords = ((wqe->length + extra_bytes + SIZE_OF_LT) >> 2) + SIZE_OF_CRC; if ((rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) && hfi1_check_mcast(rdma_ah_get_dlid(ah_attr))) { struct ib_grh *grh; struct ib_global_route *grd = rdma_ah_retrieve_grh(ah_attr); /* * Ensure OPA GIDs are transformed to IB gids * before creating the GRH. */ if (grd->sgid_index == OPA_GID_INDEX) { dd_dev_warn(ppd->dd, "Bad sgid_index. sgid_index: %d\n", grd->sgid_index); grd->sgid_index = 0; } grh = &ps->s_txreq->phdr.hdr.opah.u.l.grh; ps->s_txreq->hdr_dwords += hfi1_make_grh( ibp, grh, grd, ps->s_txreq->hdr_dwords - LRH_16B_DWORDS, nwords); ohdr = &ps->s_txreq->phdr.hdr.opah.u.l.oth; l4 = OPA_16B_L4_IB_GLOBAL; } else { ohdr = &ps->s_txreq->phdr.hdr.opah.u.oth; l4 = OPA_16B_L4_IB_LOCAL; } sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)]; if (qp->ibqp.qp_type == IB_QPT_SMI) priv->s_sc = 0xf; else priv->s_sc = sc5; dlid = opa_get_lid(rdma_ah_get_dlid(ah_attr), 16B); if (!ppd->lid) slid = be32_to_cpu(OPA_LID_PERMISSIVE); else slid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) & ((1 << ppd->lmc) - 1)); if (is_mgmt) { l4 = OPA_16B_L4_FM; pkey = hfi1_get_pkey(ibp, rvt_get_swqe_pkey_index(wqe)); hfi1_16B_set_qpn(&ps->s_txreq->phdr.hdr.opah.u.mgmt, dest_qp, src_qp); } else { hfi1_make_bth_deth(qp, wqe, ohdr, &pkey, extra_bytes, true); } /* Convert dwords to flits */ len = (ps->s_txreq->hdr_dwords + nwords) >> 1; /* Setup the packet */ ps->s_txreq->phdr.hdr.hdr_type = HFI1_PKT_TYPE_16B; hfi1_make_16b_hdr(&ps->s_txreq->phdr.hdr.opah, slid, dlid, len, pkey, 0, 0, l4, priv->s_sc); } /** * hfi1_make_ud_req - construct a UD request packet * @qp: the QP * @ps: the current packet state * * Assume s_lock is held. * * Return 1 if constructed; otherwise, return 0. */ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) { struct hfi1_qp_priv *priv = qp->priv; struct rdma_ah_attr *ah_attr; struct hfi1_pportdata *ppd; struct hfi1_ibport *ibp; struct rvt_swqe *wqe; int next_cur; u32 lid; ps->s_txreq = get_txreq(ps->dev, qp); if (!ps->s_txreq) goto bail_no_tx; if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) { if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) goto bail; /* We are in the error state, flush the work request. */ if (qp->s_last == READ_ONCE(qp->s_head)) goto bail; /* If DMAs are in progress, we can't flush immediately. */ if (iowait_sdma_pending(&priv->s_iowait)) { qp->s_flags |= RVT_S_WAIT_DMA; goto bail; } wqe = rvt_get_swqe_ptr(qp, qp->s_last); rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); goto done_free_tx; } /* see post_one_send() */ if (qp->s_cur == READ_ONCE(qp->s_head)) goto bail; wqe = rvt_get_swqe_ptr(qp, qp->s_cur); next_cur = qp->s_cur + 1; if (next_cur >= qp->s_size) next_cur = 0; /* Construct the header. */ ibp = to_iport(qp->ibqp.device, qp->port_num); ppd = ppd_from_ibp(ibp); ah_attr = rvt_get_swqe_ah_attr(wqe); priv->hdr_type = hfi1_get_hdr_type(ppd->lid, ah_attr); if ((!hfi1_check_mcast(rdma_ah_get_dlid(ah_attr))) || (rdma_ah_get_dlid(ah_attr) == be32_to_cpu(OPA_LID_PERMISSIVE))) { lid = rdma_ah_get_dlid(ah_attr) & ~((1 << ppd->lmc) - 1); if (unlikely(!loopback && ((lid == ppd->lid) || ((lid == be32_to_cpu(OPA_LID_PERMISSIVE)) && (qp->ibqp.qp_type == IB_QPT_GSI))))) { unsigned long tflags = ps->flags; /* * If DMAs are in progress, we can't generate * a completion for the loopback packet since * it would be out of order. * Instead of waiting, we could queue a * zero length descriptor so we get a callback. */ if (iowait_sdma_pending(&priv->s_iowait)) { qp->s_flags |= RVT_S_WAIT_DMA; goto bail; } qp->s_cur = next_cur; spin_unlock_irqrestore(&qp->s_lock, tflags); ud_loopback(qp, wqe); spin_lock_irqsave(&qp->s_lock, tflags); ps->flags = tflags; rvt_send_complete(qp, wqe, IB_WC_SUCCESS); goto done_free_tx; } } qp->s_cur = next_cur; ps->s_txreq->s_cur_size = wqe->length; ps->s_txreq->ss = &qp->s_sge; qp->s_srate = rdma_ah_get_static_rate(ah_attr); qp->srate_mbps = ib_rate_to_mbps(qp->s_srate); qp->s_wqe = wqe; qp->s_sge.sge = wqe->sg_list[0]; qp->s_sge.sg_list = wqe->sg_list + 1; qp->s_sge.num_sge = wqe->wr.num_sge; qp->s_sge.total_len = wqe->length; /* Make the appropriate header */ hfi1_make_ud_req_tbl[priv->hdr_type](qp, ps, qp->s_wqe); priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); ps->s_txreq->sde = priv->s_sde; priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc); ps->s_txreq->psc = priv->s_sendcontext; /* disarm any ahg */ priv->s_ahg->ahgcount = 0; priv->s_ahg->ahgidx = 0; priv->s_ahg->tx_flags = 0; return 1; done_free_tx: hfi1_put_txreq(ps->s_txreq); ps->s_txreq = NULL; return 1; bail: hfi1_put_txreq(ps->s_txreq); bail_no_tx: ps->s_txreq = NULL; qp->s_flags &= ~RVT_S_BUSY; return 0; } /* * Hardware can't check this so we do it here. * * This is a slightly different algorithm than the standard pkey check. It * special cases the management keys and allows for 0x7fff and 0xffff to be in * the table at the same time. * * @returns the index found or -1 if not found */ int hfi1_lookup_pkey_idx(struct hfi1_ibport *ibp, u16 pkey) { struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); unsigned i; if (pkey == FULL_MGMT_P_KEY || pkey == LIM_MGMT_P_KEY) { unsigned lim_idx = -1; for (i = 0; i < ARRAY_SIZE(ppd->pkeys); ++i) { /* here we look for an exact match */ if (ppd->pkeys[i] == pkey) return i; if (ppd->pkeys[i] == LIM_MGMT_P_KEY) lim_idx = i; } /* did not find 0xffff return 0x7fff idx if found */ if (pkey == FULL_MGMT_P_KEY) return lim_idx; /* no match... */ return -1; } pkey &= 0x7fff; /* remove limited/full membership bit */ for (i = 0; i < ARRAY_SIZE(ppd->pkeys); ++i) if ((ppd->pkeys[i] & 0x7fff) == pkey) return i; /* * Should not get here, this means hardware failed to validate pkeys. */ return -1; } void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn, u16 pkey, u32 slid, u32 dlid, u8 sc5, const struct ib_grh *old_grh) { u64 pbc, pbc_flags = 0; u32 bth0, plen, vl, hwords = 7; u16 len; u8 l4; struct hfi1_opa_header hdr; struct ib_other_headers *ohdr; struct pio_buf *pbuf; struct send_context *ctxt = qp_to_send_context(qp, sc5); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); u32 nwords; hdr.hdr_type = HFI1_PKT_TYPE_16B; /* Populate length */ nwords = ((hfi1_get_16b_padding(hwords << 2, 0) + SIZE_OF_LT) >> 2) + SIZE_OF_CRC; if (old_grh) { struct ib_grh *grh = &hdr.opah.u.l.grh; grh->version_tclass_flow = old_grh->version_tclass_flow; grh->paylen = cpu_to_be16( (hwords - LRH_16B_DWORDS + nwords) << 2); grh->hop_limit = 0xff; grh->sgid = old_grh->dgid; grh->dgid = old_grh->sgid; ohdr = &hdr.opah.u.l.oth; l4 = OPA_16B_L4_IB_GLOBAL; hwords += sizeof(struct ib_grh) / sizeof(u32); } else { ohdr = &hdr.opah.u.oth; l4 = OPA_16B_L4_IB_LOCAL; } /* BIT 16 to 19 is TVER. Bit 20 to 22 is pad cnt */ bth0 = (IB_OPCODE_CNP << 24) | (1 << 16) | (hfi1_get_16b_padding(hwords << 2, 0) << 20); ohdr->bth[0] = cpu_to_be32(bth0); ohdr->bth[1] = cpu_to_be32(remote_qpn); ohdr->bth[2] = 0; /* PSN 0 */ /* Convert dwords to flits */ len = (hwords + nwords) >> 1; hfi1_make_16b_hdr(&hdr.opah, slid, dlid, len, pkey, 1, 0, l4, sc5); plen = 2 /* PBC */ + hwords + nwords; pbc_flags |= PBC_PACKET_BYPASS | PBC_INSERT_BYPASS_ICRC; vl = sc_to_vlt(ppd->dd, sc5); pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen); if (ctxt) { pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL); if (!IS_ERR_OR_NULL(pbuf)) { trace_pio_output_ibhdr(ppd->dd, &hdr, sc5); ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc, &hdr, hwords); } } } void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn, u16 pkey, u32 slid, u32 dlid, u8 sc5, const struct ib_grh *old_grh) { u64 pbc, pbc_flags = 0; u32 bth0, plen, vl, hwords = 5; u16 lrh0; u8 sl = ibp->sc_to_sl[sc5]; struct hfi1_opa_header hdr; struct ib_other_headers *ohdr; struct pio_buf *pbuf; struct send_context *ctxt = qp_to_send_context(qp, sc5); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); hdr.hdr_type = HFI1_PKT_TYPE_9B; if (old_grh) { struct ib_grh *grh = &hdr.ibh.u.l.grh; grh->version_tclass_flow = old_grh->version_tclass_flow; grh->paylen = cpu_to_be16( (hwords - LRH_9B_DWORDS + SIZE_OF_CRC) << 2); grh->hop_limit = 0xff; grh->sgid = old_grh->dgid; grh->dgid = old_grh->sgid; ohdr = &hdr.ibh.u.l.oth; lrh0 = HFI1_LRH_GRH; hwords += sizeof(struct ib_grh) / sizeof(u32); } else { ohdr = &hdr.ibh.u.oth; lrh0 = HFI1_LRH_BTH; } lrh0 |= (sc5 & 0xf) << 12 | sl << 4; bth0 = pkey | (IB_OPCODE_CNP << 24); ohdr->bth[0] = cpu_to_be32(bth0); ohdr->bth[1] = cpu_to_be32(remote_qpn | (1 << IB_BECN_SHIFT)); ohdr->bth[2] = 0; /* PSN 0 */ hfi1_make_ib_hdr(&hdr.ibh, lrh0, hwords + SIZE_OF_CRC, dlid, slid); plen = 2 /* PBC */ + hwords; pbc_flags |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT); vl = sc_to_vlt(ppd->dd, sc5); pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen); if (ctxt) { pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL); if (!IS_ERR_OR_NULL(pbuf)) { trace_pio_output_ibhdr(ppd->dd, &hdr, sc5); ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc, &hdr, hwords); } } } /* * opa_smp_check() - Do the regular pkey checking, and the additional * checks for SMPs specified in OPAv1 rev 1.0, 9/19/2016 update, section * 9.10.25 ("SMA Packet Checks"). * * Note that: * - Checks are done using the pkey directly from the packet's BTH, * and specifically _not_ the pkey that we attach to the completion, * which may be different. * - These checks are specifically for "non-local" SMPs (i.e., SMPs * which originated on another node). SMPs which are sent from, and * destined to this node are checked in opa_local_smp_check(). * * At the point where opa_smp_check() is called, we know: * - destination QP is QP0 * * opa_smp_check() returns 0 if all checks succeed, 1 otherwise. */ static int opa_smp_check(struct hfi1_ibport *ibp, u16 pkey, u8 sc5, struct rvt_qp *qp, u16 slid, struct opa_smp *smp) { struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); /* * I don't think it's possible for us to get here with sc != 0xf, * but check it to be certain. */ if (sc5 != 0xf) return 1; if (rcv_pkey_check(ppd, pkey, sc5, slid)) return 1; /* * At this point we know (and so don't need to check again) that * the pkey is either LIM_MGMT_P_KEY, or FULL_MGMT_P_KEY * (see ingress_pkey_check). */ if (smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE && smp->mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED) { ingress_pkey_table_fail(ppd, pkey, slid); return 1; } /* * SMPs fall into one of four (disjoint) categories: * SMA request, SMA response, SMA trap, or SMA trap repress. * Our response depends, in part, on which type of SMP we're * processing. * * If this is an SMA response, skip the check here. * * If this is an SMA request or SMA trap repress: * - pkey != FULL_MGMT_P_KEY => * increment port recv constraint errors, drop MAD * * Otherwise: * - accept if the port is running an SM * - drop MAD if it's an SMA trap * - pkey == FULL_MGMT_P_KEY => * reply with unsupported method * - pkey != FULL_MGMT_P_KEY => * increment port recv constraint errors, drop MAD */ switch (smp->method) { case IB_MGMT_METHOD_GET_RESP: case IB_MGMT_METHOD_REPORT_RESP: break; case IB_MGMT_METHOD_GET: case IB_MGMT_METHOD_SET: case IB_MGMT_METHOD_REPORT: case IB_MGMT_METHOD_TRAP_REPRESS: if (pkey != FULL_MGMT_P_KEY) { ingress_pkey_table_fail(ppd, pkey, slid); return 1; } break; default: if (ibp->rvp.port_cap_flags & IB_PORT_SM) return 0; if (smp->method == IB_MGMT_METHOD_TRAP) return 1; if (pkey == FULL_MGMT_P_KEY) { smp->status |= IB_SMP_UNSUP_METHOD; return 0; } ingress_pkey_table_fail(ppd, pkey, slid); return 1; } return 0; } /** * hfi1_ud_rcv - receive an incoming UD packet * @packet: the packet structure * * This is called from qp_rcv() to process an incoming UD packet * for the given QP. * Called at interrupt level. */ void hfi1_ud_rcv(struct hfi1_packet *packet) { u32 hdrsize = packet->hlen; struct ib_wc wc; u32 src_qp; u16 pkey; int mgmt_pkey_idx = -1; struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); void *data = packet->payload; u32 tlen = packet->tlen; struct rvt_qp *qp = packet->qp; u8 sc5 = packet->sc; u8 sl_from_sc; u8 opcode = packet->opcode; u8 sl = packet->sl; u32 dlid = packet->dlid; u32 slid = packet->slid; u8 extra_bytes; u8 l4 = 0; bool dlid_is_permissive; bool slid_is_permissive; bool solicited = false; extra_bytes = packet->pad + packet->extra_byte + (SIZE_OF_CRC << 2); if (packet->etype == RHF_RCV_TYPE_BYPASS) { u32 permissive_lid = opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 16B); l4 = hfi1_16B_get_l4(packet->hdr); pkey = hfi1_16B_get_pkey(packet->hdr); dlid_is_permissive = (dlid == permissive_lid); slid_is_permissive = (slid == permissive_lid); } else { pkey = ib_bth_get_pkey(packet->ohdr); dlid_is_permissive = (dlid == be16_to_cpu(IB_LID_PERMISSIVE)); slid_is_permissive = (slid == be16_to_cpu(IB_LID_PERMISSIVE)); } sl_from_sc = ibp->sc_to_sl[sc5]; if (likely(l4 != OPA_16B_L4_FM)) { src_qp = ib_get_sqpn(packet->ohdr); solicited = ib_bth_is_solicited(packet->ohdr); } else { src_qp = hfi1_16B_get_src_qpn(packet->mgmt); } process_ecn(qp, packet); /* * Get the number of bytes the message was padded by * and drop incomplete packets. */ if (unlikely(tlen < (hdrsize + extra_bytes))) goto drop; tlen -= hdrsize + extra_bytes; /* * Check that the permissive LID is only used on QP0 * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1). */ if (qp->ibqp.qp_num) { if (unlikely(dlid_is_permissive || slid_is_permissive)) goto drop; if (qp->ibqp.qp_num > 1) { if (unlikely(rcv_pkey_check(ppd, pkey, sc5, slid))) { /* * Traps will not be sent for packets dropped * by the HW. This is fine, as sending trap * for invalid pkeys is optional according to * IB spec (release 1.3, section 10.9.4) */ hfi1_bad_pkey(ibp, pkey, sl, src_qp, qp->ibqp.qp_num, slid, dlid); return; } } else { /* GSI packet */ mgmt_pkey_idx = hfi1_lookup_pkey_idx(ibp, pkey); if (mgmt_pkey_idx < 0) goto drop; } if (unlikely(l4 != OPA_16B_L4_FM && ib_get_qkey(packet->ohdr) != qp->qkey)) return; /* Silent drop */ /* Drop invalid MAD packets (see 13.5.3.1). */ if (unlikely(qp->ibqp.qp_num == 1 && (tlen > 2048 || (sc5 == 0xF)))) goto drop; } else { /* Received on QP0, and so by definition, this is an SMP */ struct opa_smp *smp = (struct opa_smp *)data; if (opa_smp_check(ibp, pkey, sc5, qp, slid, smp)) goto drop; if (tlen > 2048) goto drop; if ((dlid_is_permissive || slid_is_permissive) && smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) goto drop; /* look up SMI pkey */ mgmt_pkey_idx = hfi1_lookup_pkey_idx(ibp, pkey); if (mgmt_pkey_idx < 0) goto drop; } if (qp->ibqp.qp_num > 1 && opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) { wc.ex.imm_data = packet->ohdr->u.ud.imm_data; wc.wc_flags = IB_WC_WITH_IMM; } else if (opcode == IB_OPCODE_UD_SEND_ONLY) { wc.ex.imm_data = 0; wc.wc_flags = 0; } else { goto drop; } /* * A GRH is expected to precede the data even if not * present on the wire. */ wc.byte_len = tlen + sizeof(struct ib_grh); /* * Get the next work request entry to find where to put the data. */ if (qp->r_flags & RVT_R_REUSE_SGE) { qp->r_flags &= ~RVT_R_REUSE_SGE; } else { int ret; ret = rvt_get_rwqe(qp, false); if (ret < 0) { rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR); return; } if (!ret) { if (qp->ibqp.qp_num == 0) ibp->rvp.n_vl15_dropped++; return; } } /* Silently drop packets which are too big. */ if (unlikely(wc.byte_len > qp->r_len)) { qp->r_flags |= RVT_R_REUSE_SGE; goto drop; } if (packet->grh) { rvt_copy_sge(qp, &qp->r_sge, packet->grh, sizeof(struct ib_grh), true, false); wc.wc_flags |= IB_WC_GRH; } else if (packet->etype == RHF_RCV_TYPE_BYPASS) { struct ib_grh grh; /* * Assuming we only created 16B on the send side * if we want to use large LIDs, since GRH was stripped * out when creating 16B, add back the GRH here. */ hfi1_make_ext_grh(packet, &grh, slid, dlid); rvt_copy_sge(qp, &qp->r_sge, &grh, sizeof(struct ib_grh), true, false); wc.wc_flags |= IB_WC_GRH; } else { rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true); } rvt_copy_sge(qp, &qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), true, false); rvt_put_ss(&qp->r_sge); if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) return; wc.wr_id = qp->r_wr_id; wc.status = IB_WC_SUCCESS; wc.opcode = IB_WC_RECV; wc.vendor_err = 0; wc.qp = &qp->ibqp; wc.src_qp = src_qp; if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI) { if (mgmt_pkey_idx < 0) { if (net_ratelimit()) { struct hfi1_devdata *dd = ppd->dd; dd_dev_err(dd, "QP type %d mgmt_pkey_idx < 0 and packet not dropped???\n", qp->ibqp.qp_type); mgmt_pkey_idx = 0; } } wc.pkey_index = (unsigned)mgmt_pkey_idx; } else { wc.pkey_index = 0; } if (slid_is_permissive) slid = be32_to_cpu(OPA_LID_PERMISSIVE); wc.slid = slid & U16_MAX; wc.sl = sl_from_sc; /* * Save the LMC lower bits if the destination LID is a unicast LID. */ wc.dlid_path_bits = hfi1_check_mcast(dlid) ? 0 : dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1); wc.port_num = qp->port_num; /* Signal completion event if the solicited bit is set. */ rvt_recv_cq(qp, &wc, solicited); return; drop: ibp->rvp.n_pkt_drops++; }
linux-master
drivers/infiniband/hw/hfi1/ud.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2020 Cornelis Networks, Inc. * Copyright(c) 2015-2018 Intel Corporation. */ #include <asm/page.h> #include <linux/string.h> #include "mmu_rb.h" #include "user_exp_rcv.h" #include "trace.h" static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt, struct exp_tid_set *set, struct hfi1_filedata *fd); static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages); static int set_rcvarray_entry(struct hfi1_filedata *fd, struct tid_user_buf *tbuf, u32 rcventry, struct tid_group *grp, u16 pageidx, unsigned int npages); static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata, struct tid_rb_node *tnode); static bool tid_rb_invalidate(struct mmu_interval_notifier *mni, const struct mmu_notifier_range *range, unsigned long cur_seq); static bool tid_cover_invalidate(struct mmu_interval_notifier *mni, const struct mmu_notifier_range *range, unsigned long cur_seq); static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *, struct tid_group *grp, u16 count, u32 *tidlist, unsigned int *tididx, unsigned int *pmapped); static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo); static void __clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node); static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node); static const struct mmu_interval_notifier_ops tid_mn_ops = { .invalidate = tid_rb_invalidate, }; static const struct mmu_interval_notifier_ops tid_cover_ops = { .invalidate = tid_cover_invalidate, }; /* * Initialize context and file private data needed for Expected * receive caching. This needs to be done after the context has * been configured with the eager/expected RcvEntry counts. */ int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd, struct hfi1_ctxtdata *uctxt) { int ret = 0; fd->entry_to_rb = kcalloc(uctxt->expected_count, sizeof(struct rb_node *), GFP_KERNEL); if (!fd->entry_to_rb) return -ENOMEM; if (!HFI1_CAP_UGET_MASK(uctxt->flags, TID_UNMAP)) { fd->invalid_tid_idx = 0; fd->invalid_tids = kcalloc(uctxt->expected_count, sizeof(*fd->invalid_tids), GFP_KERNEL); if (!fd->invalid_tids) { kfree(fd->entry_to_rb); fd->entry_to_rb = NULL; return -ENOMEM; } fd->use_mn = true; } /* * PSM does not have a good way to separate, count, and * effectively enforce a limit on RcvArray entries used by * subctxts (when context sharing is used) when TID caching * is enabled. To help with that, we calculate a per-process * RcvArray entry share and enforce that. * If TID caching is not in use, PSM deals with usage on its * own. In that case, we allow any subctxt to take all of the * entries. * * Make sure that we set the tid counts only after successful * init. */ spin_lock(&fd->tid_lock); if (uctxt->subctxt_cnt && fd->use_mn) { u16 remainder; fd->tid_limit = uctxt->expected_count / uctxt->subctxt_cnt; remainder = uctxt->expected_count % uctxt->subctxt_cnt; if (remainder && fd->subctxt < remainder) fd->tid_limit++; } else { fd->tid_limit = uctxt->expected_count; } spin_unlock(&fd->tid_lock); return ret; } void hfi1_user_exp_rcv_free(struct hfi1_filedata *fd) { struct hfi1_ctxtdata *uctxt = fd->uctxt; mutex_lock(&uctxt->exp_mutex); if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list)) unlock_exp_tids(uctxt, &uctxt->tid_full_list, fd); if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list)) unlock_exp_tids(uctxt, &uctxt->tid_used_list, fd); mutex_unlock(&uctxt->exp_mutex); kfree(fd->invalid_tids); fd->invalid_tids = NULL; kfree(fd->entry_to_rb); fd->entry_to_rb = NULL; } /* * Release pinned receive buffer pages. * * @mapped: true if the pages have been DMA mapped. false otherwise. * @idx: Index of the first page to unpin. * @npages: No of pages to unpin. * * If the pages have been DMA mapped (indicated by mapped parameter), their * info will be passed via a struct tid_rb_node. If they haven't been mapped, * their info will be passed via a struct tid_user_buf. */ static void unpin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf, struct tid_rb_node *node, unsigned int idx, unsigned int npages, bool mapped) { struct page **pages; struct hfi1_devdata *dd = fd->uctxt->dd; struct mm_struct *mm; if (mapped) { dma_unmap_single(&dd->pcidev->dev, node->dma_addr, node->npages * PAGE_SIZE, DMA_FROM_DEVICE); pages = &node->pages[idx]; mm = mm_from_tid_node(node); } else { pages = &tidbuf->pages[idx]; mm = current->mm; } hfi1_release_user_pages(mm, pages, npages, mapped); fd->tid_n_pinned -= npages; } /* * Pin receive buffer pages. */ static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf) { int pinned; unsigned int npages = tidbuf->npages; unsigned long vaddr = tidbuf->vaddr; struct page **pages = NULL; struct hfi1_devdata *dd = fd->uctxt->dd; if (npages > fd->uctxt->expected_count) { dd_dev_err(dd, "Expected buffer too big\n"); return -EINVAL; } /* Allocate the array of struct page pointers needed for pinning */ pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL); if (!pages) return -ENOMEM; /* * Pin all the pages of the user buffer. If we can't pin all the * pages, accept the amount pinned so far and program only that. * User space knows how to deal with partially programmed buffers. */ if (!hfi1_can_pin_pages(dd, current->mm, fd->tid_n_pinned, npages)) { kfree(pages); return -ENOMEM; } pinned = hfi1_acquire_user_pages(current->mm, vaddr, npages, true, pages); if (pinned <= 0) { kfree(pages); return pinned; } tidbuf->pages = pages; fd->tid_n_pinned += pinned; return pinned; } /* * RcvArray entry allocation for Expected Receives is done by the * following algorithm: * * The context keeps 3 lists of groups of RcvArray entries: * 1. List of empty groups - tid_group_list * This list is created during user context creation and * contains elements which describe sets (of 8) of empty * RcvArray entries. * 2. List of partially used groups - tid_used_list * This list contains sets of RcvArray entries which are * not completely used up. Another mapping request could * use some of all of the remaining entries. * 3. List of full groups - tid_full_list * This is the list where sets that are completely used * up go. * * An attempt to optimize the usage of RcvArray entries is * made by finding all sets of physically contiguous pages in a * user's buffer. * These physically contiguous sets are further split into * sizes supported by the receive engine of the HFI. The * resulting sets of pages are stored in struct tid_pageset, * which describes the sets as: * * .count - number of pages in this set * * .idx - starting index into struct page ** array * of this set * * From this point on, the algorithm deals with the page sets * described above. The number of pagesets is divided by the * RcvArray group size to produce the number of full groups * needed. * * Groups from the 3 lists are manipulated using the following * rules: * 1. For each set of 8 pagesets, a complete group from * tid_group_list is taken, programmed, and moved to * the tid_full_list list. * 2. For all remaining pagesets: * 2.1 If the tid_used_list is empty and the tid_group_list * is empty, stop processing pageset and return only * what has been programmed up to this point. * 2.2 If the tid_used_list is empty and the tid_group_list * is not empty, move a group from tid_group_list to * tid_used_list. * 2.3 For each group is tid_used_group, program as much as * can fit into the group. If the group becomes fully * used, move it to tid_full_list. */ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd, struct hfi1_tid_info *tinfo) { int ret = 0, need_group = 0, pinned; struct hfi1_ctxtdata *uctxt = fd->uctxt; struct hfi1_devdata *dd = uctxt->dd; unsigned int ngroups, pageset_count, tididx = 0, mapped, mapped_pages = 0; u32 *tidlist = NULL; struct tid_user_buf *tidbuf; unsigned long mmu_seq = 0; if (!PAGE_ALIGNED(tinfo->vaddr)) return -EINVAL; if (tinfo->length == 0) return -EINVAL; tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL); if (!tidbuf) return -ENOMEM; mutex_init(&tidbuf->cover_mutex); tidbuf->vaddr = tinfo->vaddr; tidbuf->length = tinfo->length; tidbuf->npages = num_user_pages(tidbuf->vaddr, tidbuf->length); tidbuf->psets = kcalloc(uctxt->expected_count, sizeof(*tidbuf->psets), GFP_KERNEL); if (!tidbuf->psets) { ret = -ENOMEM; goto fail_release_mem; } if (fd->use_mn) { ret = mmu_interval_notifier_insert( &tidbuf->notifier, current->mm, tidbuf->vaddr, tidbuf->npages * PAGE_SIZE, &tid_cover_ops); if (ret) goto fail_release_mem; mmu_seq = mmu_interval_read_begin(&tidbuf->notifier); } pinned = pin_rcv_pages(fd, tidbuf); if (pinned <= 0) { ret = (pinned < 0) ? pinned : -ENOSPC; goto fail_unpin; } /* Find sets of physically contiguous pages */ tidbuf->n_psets = find_phys_blocks(tidbuf, pinned); /* Reserve the number of expected tids to be used. */ spin_lock(&fd->tid_lock); if (fd->tid_used + tidbuf->n_psets > fd->tid_limit) pageset_count = fd->tid_limit - fd->tid_used; else pageset_count = tidbuf->n_psets; fd->tid_used += pageset_count; spin_unlock(&fd->tid_lock); if (!pageset_count) { ret = -ENOSPC; goto fail_unreserve; } ngroups = pageset_count / dd->rcv_entries.group_size; tidlist = kcalloc(pageset_count, sizeof(*tidlist), GFP_KERNEL); if (!tidlist) { ret = -ENOMEM; goto fail_unreserve; } tididx = 0; /* * From this point on, we are going to be using shared (between master * and subcontexts) context resources. We need to take the lock. */ mutex_lock(&uctxt->exp_mutex); /* * The first step is to program the RcvArray entries which are complete * groups. */ while (ngroups && uctxt->tid_group_list.count) { struct tid_group *grp = tid_group_pop(&uctxt->tid_group_list); ret = program_rcvarray(fd, tidbuf, grp, dd->rcv_entries.group_size, tidlist, &tididx, &mapped); /* * If there was a failure to program the RcvArray * entries for the entire group, reset the grp fields * and add the grp back to the free group list. */ if (ret <= 0) { tid_group_add_tail(grp, &uctxt->tid_group_list); hfi1_cdbg(TID, "Failed to program RcvArray group %d", ret); goto unlock; } tid_group_add_tail(grp, &uctxt->tid_full_list); ngroups--; mapped_pages += mapped; } while (tididx < pageset_count) { struct tid_group *grp, *ptr; /* * If we don't have any partially used tid groups, check * if we have empty groups. If so, take one from there and * put in the partially used list. */ if (!uctxt->tid_used_list.count || need_group) { if (!uctxt->tid_group_list.count) goto unlock; grp = tid_group_pop(&uctxt->tid_group_list); tid_group_add_tail(grp, &uctxt->tid_used_list); need_group = 0; } /* * There is an optimization opportunity here - instead of * fitting as many page sets as we can, check for a group * later on in the list that could fit all of them. */ list_for_each_entry_safe(grp, ptr, &uctxt->tid_used_list.list, list) { unsigned use = min_t(unsigned, pageset_count - tididx, grp->size - grp->used); ret = program_rcvarray(fd, tidbuf, grp, use, tidlist, &tididx, &mapped); if (ret < 0) { hfi1_cdbg(TID, "Failed to program RcvArray entries %d", ret); goto unlock; } else if (ret > 0) { if (grp->used == grp->size) tid_group_move(grp, &uctxt->tid_used_list, &uctxt->tid_full_list); mapped_pages += mapped; need_group = 0; /* Check if we are done so we break out early */ if (tididx >= pageset_count) break; } else if (WARN_ON(ret == 0)) { /* * If ret is 0, we did not program any entries * into this group, which can only happen if * we've screwed up the accounting somewhere. * Warn and try to continue. */ need_group = 1; } } } unlock: mutex_unlock(&uctxt->exp_mutex); hfi1_cdbg(TID, "total mapped: tidpairs:%u pages:%u (%d)", tididx, mapped_pages, ret); /* fail if nothing was programmed, set error if none provided */ if (tididx == 0) { if (ret >= 0) ret = -ENOSPC; goto fail_unreserve; } /* adjust reserved tid_used to actual count */ spin_lock(&fd->tid_lock); fd->tid_used -= pageset_count - tididx; spin_unlock(&fd->tid_lock); /* unpin all pages not covered by a TID */ unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages, pinned - mapped_pages, false); if (fd->use_mn) { /* check for an invalidate during setup */ bool fail = false; mutex_lock(&tidbuf->cover_mutex); fail = mmu_interval_read_retry(&tidbuf->notifier, mmu_seq); mutex_unlock(&tidbuf->cover_mutex); if (fail) { ret = -EBUSY; goto fail_unprogram; } } tinfo->tidcnt = tididx; tinfo->length = mapped_pages * PAGE_SIZE; if (copy_to_user(u64_to_user_ptr(tinfo->tidlist), tidlist, sizeof(tidlist[0]) * tididx)) { ret = -EFAULT; goto fail_unprogram; } if (fd->use_mn) mmu_interval_notifier_remove(&tidbuf->notifier); kfree(tidbuf->pages); kfree(tidbuf->psets); kfree(tidbuf); kfree(tidlist); return 0; fail_unprogram: /* unprogram, unmap, and unpin all allocated TIDs */ tinfo->tidlist = (unsigned long)tidlist; hfi1_user_exp_rcv_clear(fd, tinfo); tinfo->tidlist = 0; pinned = 0; /* nothing left to unpin */ pageset_count = 0; /* nothing left reserved */ fail_unreserve: spin_lock(&fd->tid_lock); fd->tid_used -= pageset_count; spin_unlock(&fd->tid_lock); fail_unpin: if (fd->use_mn) mmu_interval_notifier_remove(&tidbuf->notifier); if (pinned > 0) unpin_rcv_pages(fd, tidbuf, NULL, 0, pinned, false); fail_release_mem: kfree(tidbuf->pages); kfree(tidbuf->psets); kfree(tidbuf); kfree(tidlist); return ret; } int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd, struct hfi1_tid_info *tinfo) { int ret = 0; struct hfi1_ctxtdata *uctxt = fd->uctxt; u32 *tidinfo; unsigned tididx; if (unlikely(tinfo->tidcnt > fd->tid_used)) return -EINVAL; tidinfo = memdup_user(u64_to_user_ptr(tinfo->tidlist), sizeof(tidinfo[0]) * tinfo->tidcnt); if (IS_ERR(tidinfo)) return PTR_ERR(tidinfo); mutex_lock(&uctxt->exp_mutex); for (tididx = 0; tididx < tinfo->tidcnt; tididx++) { ret = unprogram_rcvarray(fd, tidinfo[tididx]); if (ret) { hfi1_cdbg(TID, "Failed to unprogram rcv array %d", ret); break; } } spin_lock(&fd->tid_lock); fd->tid_used -= tididx; spin_unlock(&fd->tid_lock); tinfo->tidcnt = tididx; mutex_unlock(&uctxt->exp_mutex); kfree(tidinfo); return ret; } int hfi1_user_exp_rcv_invalid(struct hfi1_filedata *fd, struct hfi1_tid_info *tinfo) { struct hfi1_ctxtdata *uctxt = fd->uctxt; unsigned long *ev = uctxt->dd->events + (uctxt_offset(uctxt) + fd->subctxt); u32 *array; int ret = 0; /* * copy_to_user() can sleep, which will leave the invalid_lock * locked and cause the MMU notifier to be blocked on the lock * for a long time. * Copy the data to a local buffer so we can release the lock. */ array = kcalloc(uctxt->expected_count, sizeof(*array), GFP_KERNEL); if (!array) return -EFAULT; spin_lock(&fd->invalid_lock); if (fd->invalid_tid_idx) { memcpy(array, fd->invalid_tids, sizeof(*array) * fd->invalid_tid_idx); memset(fd->invalid_tids, 0, sizeof(*fd->invalid_tids) * fd->invalid_tid_idx); tinfo->tidcnt = fd->invalid_tid_idx; fd->invalid_tid_idx = 0; /* * Reset the user flag while still holding the lock. * Otherwise, PSM can miss events. */ clear_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev); } else { tinfo->tidcnt = 0; } spin_unlock(&fd->invalid_lock); if (tinfo->tidcnt) { if (copy_to_user((void __user *)tinfo->tidlist, array, sizeof(*array) * tinfo->tidcnt)) ret = -EFAULT; } kfree(array); return ret; } static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages) { unsigned pagecount, pageidx, setcount = 0, i; unsigned long pfn, this_pfn; struct page **pages = tidbuf->pages; struct tid_pageset *list = tidbuf->psets; if (!npages) return 0; /* * Look for sets of physically contiguous pages in the user buffer. * This will allow us to optimize Expected RcvArray entry usage by * using the bigger supported sizes. */ pfn = page_to_pfn(pages[0]); for (pageidx = 0, pagecount = 1, i = 1; i <= npages; i++) { this_pfn = i < npages ? page_to_pfn(pages[i]) : 0; /* * If the pfn's are not sequential, pages are not physically * contiguous. */ if (this_pfn != ++pfn) { /* * At this point we have to loop over the set of * physically contiguous pages and break them down it * sizes supported by the HW. * There are two main constraints: * 1. The max buffer size is MAX_EXPECTED_BUFFER. * If the total set size is bigger than that * program only a MAX_EXPECTED_BUFFER chunk. * 2. The buffer size has to be a power of two. If * it is not, round down to the closes power of * 2 and program that size. */ while (pagecount) { int maxpages = pagecount; u32 bufsize = pagecount * PAGE_SIZE; if (bufsize > MAX_EXPECTED_BUFFER) maxpages = MAX_EXPECTED_BUFFER >> PAGE_SHIFT; else if (!is_power_of_2(bufsize)) maxpages = rounddown_pow_of_two(bufsize) >> PAGE_SHIFT; list[setcount].idx = pageidx; list[setcount].count = maxpages; pagecount -= maxpages; pageidx += maxpages; setcount++; } pageidx = i; pagecount = 1; pfn = this_pfn; } else { pagecount++; } } return setcount; } /** * program_rcvarray() - program an RcvArray group with receive buffers * @fd: filedata pointer * @tbuf: pointer to struct tid_user_buf that has the user buffer starting * virtual address, buffer length, page pointers, pagesets (array of * struct tid_pageset holding information on physically contiguous * chunks from the user buffer), and other fields. * @grp: RcvArray group * @count: number of struct tid_pageset's to program * @tidlist: the array of u32 elements when the information about the * programmed RcvArray entries is to be encoded. * @tididx: starting offset into tidlist * @pmapped: (output parameter) number of pages programmed into the RcvArray * entries. * * This function will program up to 'count' number of RcvArray entries from the * group 'grp'. To make best use of write-combining writes, the function will * perform writes to the unused RcvArray entries which will be ignored by the * HW. Each RcvArray entry will be programmed with a physically contiguous * buffer chunk from the user's virtual buffer. * * Return: * -EINVAL if the requested count is larger than the size of the group, * -ENOMEM or -EFAULT on error from set_rcvarray_entry(), or * number of RcvArray entries programmed. */ static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *tbuf, struct tid_group *grp, u16 count, u32 *tidlist, unsigned int *tididx, unsigned int *pmapped) { struct hfi1_ctxtdata *uctxt = fd->uctxt; struct hfi1_devdata *dd = uctxt->dd; u16 idx; unsigned int start = *tididx; u32 tidinfo = 0, rcventry, useidx = 0; int mapped = 0; /* Count should never be larger than the group size */ if (count > grp->size) return -EINVAL; /* Find the first unused entry in the group */ for (idx = 0; idx < grp->size; idx++) { if (!(grp->map & (1 << idx))) { useidx = idx; break; } rcv_array_wc_fill(dd, grp->base + idx); } idx = 0; while (idx < count) { u16 npages, pageidx, setidx = start + idx; int ret = 0; /* * If this entry in the group is used, move to the next one. * If we go past the end of the group, exit the loop. */ if (useidx >= grp->size) { break; } else if (grp->map & (1 << useidx)) { rcv_array_wc_fill(dd, grp->base + useidx); useidx++; continue; } rcventry = grp->base + useidx; npages = tbuf->psets[setidx].count; pageidx = tbuf->psets[setidx].idx; ret = set_rcvarray_entry(fd, tbuf, rcventry, grp, pageidx, npages); if (ret) return ret; mapped += npages; tidinfo = create_tid(rcventry - uctxt->expected_base, npages); tidlist[(*tididx)++] = tidinfo; grp->used++; grp->map |= 1 << useidx++; idx++; } /* Fill the rest of the group with "blank" writes */ for (; useidx < grp->size; useidx++) rcv_array_wc_fill(dd, grp->base + useidx); *pmapped = mapped; return idx; } static int set_rcvarray_entry(struct hfi1_filedata *fd, struct tid_user_buf *tbuf, u32 rcventry, struct tid_group *grp, u16 pageidx, unsigned int npages) { int ret; struct hfi1_ctxtdata *uctxt = fd->uctxt; struct tid_rb_node *node; struct hfi1_devdata *dd = uctxt->dd; dma_addr_t phys; struct page **pages = tbuf->pages + pageidx; /* * Allocate the node first so we can handle a potential * failure before we've programmed anything. */ node = kzalloc(struct_size(node, pages, npages), GFP_KERNEL); if (!node) return -ENOMEM; phys = dma_map_single(&dd->pcidev->dev, __va(page_to_phys(pages[0])), npages * PAGE_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(&dd->pcidev->dev, phys)) { dd_dev_err(dd, "Failed to DMA map Exp Rcv pages 0x%llx\n", phys); kfree(node); return -EFAULT; } node->fdata = fd; mutex_init(&node->invalidate_mutex); node->phys = page_to_phys(pages[0]); node->npages = npages; node->rcventry = rcventry; node->dma_addr = phys; node->grp = grp; node->freed = false; memcpy(node->pages, pages, flex_array_size(node, pages, npages)); if (fd->use_mn) { ret = mmu_interval_notifier_insert( &node->notifier, current->mm, tbuf->vaddr + (pageidx * PAGE_SIZE), npages * PAGE_SIZE, &tid_mn_ops); if (ret) goto out_unmap; } fd->entry_to_rb[node->rcventry - uctxt->expected_base] = node; hfi1_put_tid(dd, rcventry, PT_EXPECTED, phys, ilog2(npages) + 1); trace_hfi1_exp_tid_reg(uctxt->ctxt, fd->subctxt, rcventry, npages, node->notifier.interval_tree.start, node->phys, phys); return 0; out_unmap: hfi1_cdbg(TID, "Failed to insert RB node %u 0x%lx, 0x%lx %d", node->rcventry, node->notifier.interval_tree.start, node->phys, ret); dma_unmap_single(&dd->pcidev->dev, phys, npages * PAGE_SIZE, DMA_FROM_DEVICE); kfree(node); return -EFAULT; } static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo) { struct hfi1_ctxtdata *uctxt = fd->uctxt; struct hfi1_devdata *dd = uctxt->dd; struct tid_rb_node *node; u32 tidctrl = EXP_TID_GET(tidinfo, CTRL); u32 tididx = EXP_TID_GET(tidinfo, IDX) << 1, rcventry; if (tidctrl == 0x3 || tidctrl == 0x0) return -EINVAL; rcventry = tididx + (tidctrl - 1); if (rcventry >= uctxt->expected_count) { dd_dev_err(dd, "Invalid RcvArray entry (%u) index for ctxt %u\n", rcventry, uctxt->ctxt); return -EINVAL; } node = fd->entry_to_rb[rcventry]; if (!node || node->rcventry != (uctxt->expected_base + rcventry)) return -EBADF; if (fd->use_mn) mmu_interval_notifier_remove(&node->notifier); cacheless_tid_rb_remove(fd, node); return 0; } static void __clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node) { struct hfi1_ctxtdata *uctxt = fd->uctxt; struct hfi1_devdata *dd = uctxt->dd; mutex_lock(&node->invalidate_mutex); if (node->freed) goto done; node->freed = true; trace_hfi1_exp_tid_unreg(uctxt->ctxt, fd->subctxt, node->rcventry, node->npages, node->notifier.interval_tree.start, node->phys, node->dma_addr); /* Make sure device has seen the write before pages are unpinned */ hfi1_put_tid(dd, node->rcventry, PT_INVALID_FLUSH, 0, 0); unpin_rcv_pages(fd, NULL, node, 0, node->npages, true); done: mutex_unlock(&node->invalidate_mutex); } static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node) { struct hfi1_ctxtdata *uctxt = fd->uctxt; __clear_tid_node(fd, node); node->grp->used--; node->grp->map &= ~(1 << (node->rcventry - node->grp->base)); if (node->grp->used == node->grp->size - 1) tid_group_move(node->grp, &uctxt->tid_full_list, &uctxt->tid_used_list); else if (!node->grp->used) tid_group_move(node->grp, &uctxt->tid_used_list, &uctxt->tid_group_list); kfree(node); } /* * As a simple helper for hfi1_user_exp_rcv_free, this function deals with * clearing nodes in the non-cached case. */ static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt, struct exp_tid_set *set, struct hfi1_filedata *fd) { struct tid_group *grp, *ptr; int i; list_for_each_entry_safe(grp, ptr, &set->list, list) { list_del_init(&grp->list); for (i = 0; i < grp->size; i++) { if (grp->map & (1 << i)) { u16 rcventry = grp->base + i; struct tid_rb_node *node; node = fd->entry_to_rb[rcventry - uctxt->expected_base]; if (!node || node->rcventry != rcventry) continue; if (fd->use_mn) mmu_interval_notifier_remove( &node->notifier); cacheless_tid_rb_remove(fd, node); } } } } static bool tid_rb_invalidate(struct mmu_interval_notifier *mni, const struct mmu_notifier_range *range, unsigned long cur_seq) { struct tid_rb_node *node = container_of(mni, struct tid_rb_node, notifier); struct hfi1_filedata *fdata = node->fdata; struct hfi1_ctxtdata *uctxt = fdata->uctxt; if (node->freed) return true; /* take action only if unmapping */ if (range->event != MMU_NOTIFY_UNMAP) return true; trace_hfi1_exp_tid_inval(uctxt->ctxt, fdata->subctxt, node->notifier.interval_tree.start, node->rcventry, node->npages, node->dma_addr); /* clear the hardware rcvarray entry */ __clear_tid_node(fdata, node); spin_lock(&fdata->invalid_lock); if (fdata->invalid_tid_idx < uctxt->expected_count) { fdata->invalid_tids[fdata->invalid_tid_idx] = create_tid(node->rcventry - uctxt->expected_base, node->npages); if (!fdata->invalid_tid_idx) { unsigned long *ev; /* * hfi1_set_uevent_bits() sets a user event flag * for all processes. Because calling into the * driver to process TID cache invalidations is * expensive and TID cache invalidations are * handled on a per-process basis, we can * optimize this to set the flag only for the * process in question. */ ev = uctxt->dd->events + (uctxt_offset(uctxt) + fdata->subctxt); set_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev); } fdata->invalid_tid_idx++; } spin_unlock(&fdata->invalid_lock); return true; } static bool tid_cover_invalidate(struct mmu_interval_notifier *mni, const struct mmu_notifier_range *range, unsigned long cur_seq) { struct tid_user_buf *tidbuf = container_of(mni, struct tid_user_buf, notifier); /* take action only if unmapping */ if (range->event == MMU_NOTIFY_UNMAP) { mutex_lock(&tidbuf->cover_mutex); mmu_interval_set_seq(mni, cur_seq); mutex_unlock(&tidbuf->cover_mutex); } return true; } static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata, struct tid_rb_node *tnode) { u32 base = fdata->uctxt->expected_base; fdata->entry_to_rb[tnode->rcventry - base] = NULL; clear_tid_node(fdata, tnode); }
linux-master
drivers/infiniband/hw/hfi1/user_exp_rcv.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2015 - 2020 Intel Corporation. */ #include <rdma/ib_mad.h> #include <rdma/ib_user_verbs.h> #include <linux/io.h> #include <linux/module.h> #include <linux/utsname.h> #include <linux/rculist.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <rdma/opa_addr.h> #include <linux/nospec.h> #include "hfi.h" #include "common.h" #include "device.h" #include "trace.h" #include "qp.h" #include "verbs_txreq.h" #include "debugfs.h" #include "vnic.h" #include "fault.h" #include "affinity.h" #include "ipoib.h" static unsigned int hfi1_lkey_table_size = 16; module_param_named(lkey_table_size, hfi1_lkey_table_size, uint, S_IRUGO); MODULE_PARM_DESC(lkey_table_size, "LKEY table size in bits (2^n, 1 <= n <= 23)"); static unsigned int hfi1_max_pds = 0xFFFF; module_param_named(max_pds, hfi1_max_pds, uint, S_IRUGO); MODULE_PARM_DESC(max_pds, "Maximum number of protection domains to support"); static unsigned int hfi1_max_ahs = 0xFFFF; module_param_named(max_ahs, hfi1_max_ahs, uint, S_IRUGO); MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support"); unsigned int hfi1_max_cqes = 0x2FFFFF; module_param_named(max_cqes, hfi1_max_cqes, uint, S_IRUGO); MODULE_PARM_DESC(max_cqes, "Maximum number of completion queue entries to support"); unsigned int hfi1_max_cqs = 0x1FFFF; module_param_named(max_cqs, hfi1_max_cqs, uint, S_IRUGO); MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support"); unsigned int hfi1_max_qp_wrs = 0x3FFF; module_param_named(max_qp_wrs, hfi1_max_qp_wrs, uint, S_IRUGO); MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support"); unsigned int hfi1_max_qps = 32768; module_param_named(max_qps, hfi1_max_qps, uint, S_IRUGO); MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support"); unsigned int hfi1_max_sges = 0x60; module_param_named(max_sges, hfi1_max_sges, uint, S_IRUGO); MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support"); unsigned int hfi1_max_mcast_grps = 16384; module_param_named(max_mcast_grps, hfi1_max_mcast_grps, uint, S_IRUGO); MODULE_PARM_DESC(max_mcast_grps, "Maximum number of multicast groups to support"); unsigned int hfi1_max_mcast_qp_attached = 16; module_param_named(max_mcast_qp_attached, hfi1_max_mcast_qp_attached, uint, S_IRUGO); MODULE_PARM_DESC(max_mcast_qp_attached, "Maximum number of attached QPs to support"); unsigned int hfi1_max_srqs = 1024; module_param_named(max_srqs, hfi1_max_srqs, uint, S_IRUGO); MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support"); unsigned int hfi1_max_srq_sges = 128; module_param_named(max_srq_sges, hfi1_max_srq_sges, uint, S_IRUGO); MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support"); unsigned int hfi1_max_srq_wrs = 0x1FFFF; module_param_named(max_srq_wrs, hfi1_max_srq_wrs, uint, S_IRUGO); MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support"); unsigned short piothreshold = 256; module_param(piothreshold, ushort, S_IRUGO); MODULE_PARM_DESC(piothreshold, "size used to determine sdma vs. pio"); static unsigned int sge_copy_mode; module_param(sge_copy_mode, uint, S_IRUGO); MODULE_PARM_DESC(sge_copy_mode, "Verbs copy mode: 0 use memcpy, 1 use cacheless copy, 2 adapt based on WSS"); static void verbs_sdma_complete( struct sdma_txreq *cookie, int status); static int pio_wait(struct rvt_qp *qp, struct send_context *sc, struct hfi1_pkt_state *ps, u32 flag); /* Length of buffer to create verbs txreq cache name */ #define TXREQ_NAME_LEN 24 static uint wss_threshold = 80; module_param(wss_threshold, uint, S_IRUGO); MODULE_PARM_DESC(wss_threshold, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy"); static uint wss_clean_period = 256; module_param(wss_clean_period, uint, S_IRUGO); MODULE_PARM_DESC(wss_clean_period, "Count of verbs copies before an entry in the page copy table is cleaned"); /* * Translate ib_wr_opcode into ib_wc_opcode. */ const enum ib_wc_opcode ib_hfi1_wc_opcode[] = { [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE, [IB_WR_TID_RDMA_WRITE] = IB_WC_RDMA_WRITE, [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE, [IB_WR_SEND] = IB_WC_SEND, [IB_WR_SEND_WITH_IMM] = IB_WC_SEND, [IB_WR_RDMA_READ] = IB_WC_RDMA_READ, [IB_WR_TID_RDMA_READ] = IB_WC_RDMA_READ, [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP, [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD, [IB_WR_SEND_WITH_INV] = IB_WC_SEND, [IB_WR_LOCAL_INV] = IB_WC_LOCAL_INV, [IB_WR_REG_MR] = IB_WC_REG_MR }; /* * Length of header by opcode, 0 --> not supported */ const u8 hdr_len_by_opcode[256] = { /* RC */ [IB_OPCODE_RC_SEND_FIRST] = 12 + 8, [IB_OPCODE_RC_SEND_MIDDLE] = 12 + 8, [IB_OPCODE_RC_SEND_LAST] = 12 + 8, [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE] = 12 + 8 + 4, [IB_OPCODE_RC_SEND_ONLY] = 12 + 8, [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE] = 12 + 8 + 4, [IB_OPCODE_RC_RDMA_WRITE_FIRST] = 12 + 8 + 16, [IB_OPCODE_RC_RDMA_WRITE_MIDDLE] = 12 + 8, [IB_OPCODE_RC_RDMA_WRITE_LAST] = 12 + 8, [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = 12 + 8 + 4, [IB_OPCODE_RC_RDMA_WRITE_ONLY] = 12 + 8 + 16, [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = 12 + 8 + 20, [IB_OPCODE_RC_RDMA_READ_REQUEST] = 12 + 8 + 16, [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST] = 12 + 8 + 4, [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE] = 12 + 8, [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST] = 12 + 8 + 4, [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY] = 12 + 8 + 4, [IB_OPCODE_RC_ACKNOWLEDGE] = 12 + 8 + 4, [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = 12 + 8 + 4 + 8, [IB_OPCODE_RC_COMPARE_SWAP] = 12 + 8 + 28, [IB_OPCODE_RC_FETCH_ADD] = 12 + 8 + 28, [IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = 12 + 8 + 4, [IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE] = 12 + 8 + 4, [IB_OPCODE_TID_RDMA_READ_REQ] = 12 + 8 + 36, [IB_OPCODE_TID_RDMA_READ_RESP] = 12 + 8 + 36, [IB_OPCODE_TID_RDMA_WRITE_REQ] = 12 + 8 + 36, [IB_OPCODE_TID_RDMA_WRITE_RESP] = 12 + 8 + 36, [IB_OPCODE_TID_RDMA_WRITE_DATA] = 12 + 8 + 36, [IB_OPCODE_TID_RDMA_WRITE_DATA_LAST] = 12 + 8 + 36, [IB_OPCODE_TID_RDMA_ACK] = 12 + 8 + 36, [IB_OPCODE_TID_RDMA_RESYNC] = 12 + 8 + 36, /* UC */ [IB_OPCODE_UC_SEND_FIRST] = 12 + 8, [IB_OPCODE_UC_SEND_MIDDLE] = 12 + 8, [IB_OPCODE_UC_SEND_LAST] = 12 + 8, [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE] = 12 + 8 + 4, [IB_OPCODE_UC_SEND_ONLY] = 12 + 8, [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE] = 12 + 8 + 4, [IB_OPCODE_UC_RDMA_WRITE_FIRST] = 12 + 8 + 16, [IB_OPCODE_UC_RDMA_WRITE_MIDDLE] = 12 + 8, [IB_OPCODE_UC_RDMA_WRITE_LAST] = 12 + 8, [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = 12 + 8 + 4, [IB_OPCODE_UC_RDMA_WRITE_ONLY] = 12 + 8 + 16, [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = 12 + 8 + 20, /* UD */ [IB_OPCODE_UD_SEND_ONLY] = 12 + 8 + 8, [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE] = 12 + 8 + 12 }; static const opcode_handler opcode_handler_tbl[256] = { /* RC */ [IB_OPCODE_RC_SEND_FIRST] = &hfi1_rc_rcv, [IB_OPCODE_RC_SEND_MIDDLE] = &hfi1_rc_rcv, [IB_OPCODE_RC_SEND_LAST] = &hfi1_rc_rcv, [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE] = &hfi1_rc_rcv, [IB_OPCODE_RC_SEND_ONLY] = &hfi1_rc_rcv, [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE] = &hfi1_rc_rcv, [IB_OPCODE_RC_RDMA_WRITE_FIRST] = &hfi1_rc_rcv, [IB_OPCODE_RC_RDMA_WRITE_MIDDLE] = &hfi1_rc_rcv, [IB_OPCODE_RC_RDMA_WRITE_LAST] = &hfi1_rc_rcv, [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = &hfi1_rc_rcv, [IB_OPCODE_RC_RDMA_WRITE_ONLY] = &hfi1_rc_rcv, [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = &hfi1_rc_rcv, [IB_OPCODE_RC_RDMA_READ_REQUEST] = &hfi1_rc_rcv, [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST] = &hfi1_rc_rcv, [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE] = &hfi1_rc_rcv, [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST] = &hfi1_rc_rcv, [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY] = &hfi1_rc_rcv, [IB_OPCODE_RC_ACKNOWLEDGE] = &hfi1_rc_rcv, [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = &hfi1_rc_rcv, [IB_OPCODE_RC_COMPARE_SWAP] = &hfi1_rc_rcv, [IB_OPCODE_RC_FETCH_ADD] = &hfi1_rc_rcv, [IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = &hfi1_rc_rcv, [IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE] = &hfi1_rc_rcv, /* TID RDMA has separate handlers for different opcodes.*/ [IB_OPCODE_TID_RDMA_WRITE_REQ] = &hfi1_rc_rcv_tid_rdma_write_req, [IB_OPCODE_TID_RDMA_WRITE_RESP] = &hfi1_rc_rcv_tid_rdma_write_resp, [IB_OPCODE_TID_RDMA_WRITE_DATA] = &hfi1_rc_rcv_tid_rdma_write_data, [IB_OPCODE_TID_RDMA_WRITE_DATA_LAST] = &hfi1_rc_rcv_tid_rdma_write_data, [IB_OPCODE_TID_RDMA_READ_REQ] = &hfi1_rc_rcv_tid_rdma_read_req, [IB_OPCODE_TID_RDMA_READ_RESP] = &hfi1_rc_rcv_tid_rdma_read_resp, [IB_OPCODE_TID_RDMA_RESYNC] = &hfi1_rc_rcv_tid_rdma_resync, [IB_OPCODE_TID_RDMA_ACK] = &hfi1_rc_rcv_tid_rdma_ack, /* UC */ [IB_OPCODE_UC_SEND_FIRST] = &hfi1_uc_rcv, [IB_OPCODE_UC_SEND_MIDDLE] = &hfi1_uc_rcv, [IB_OPCODE_UC_SEND_LAST] = &hfi1_uc_rcv, [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE] = &hfi1_uc_rcv, [IB_OPCODE_UC_SEND_ONLY] = &hfi1_uc_rcv, [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE] = &hfi1_uc_rcv, [IB_OPCODE_UC_RDMA_WRITE_FIRST] = &hfi1_uc_rcv, [IB_OPCODE_UC_RDMA_WRITE_MIDDLE] = &hfi1_uc_rcv, [IB_OPCODE_UC_RDMA_WRITE_LAST] = &hfi1_uc_rcv, [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = &hfi1_uc_rcv, [IB_OPCODE_UC_RDMA_WRITE_ONLY] = &hfi1_uc_rcv, [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = &hfi1_uc_rcv, /* UD */ [IB_OPCODE_UD_SEND_ONLY] = &hfi1_ud_rcv, [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE] = &hfi1_ud_rcv, /* CNP */ [IB_OPCODE_CNP] = &hfi1_cnp_rcv }; #define OPMASK 0x1f static const u32 pio_opmask[BIT(3)] = { /* RC */ [IB_OPCODE_RC >> 5] = BIT(RC_OP(SEND_ONLY) & OPMASK) | BIT(RC_OP(SEND_ONLY_WITH_IMMEDIATE) & OPMASK) | BIT(RC_OP(RDMA_WRITE_ONLY) & OPMASK) | BIT(RC_OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE) & OPMASK) | BIT(RC_OP(RDMA_READ_REQUEST) & OPMASK) | BIT(RC_OP(ACKNOWLEDGE) & OPMASK) | BIT(RC_OP(ATOMIC_ACKNOWLEDGE) & OPMASK) | BIT(RC_OP(COMPARE_SWAP) & OPMASK) | BIT(RC_OP(FETCH_ADD) & OPMASK), /* UC */ [IB_OPCODE_UC >> 5] = BIT(UC_OP(SEND_ONLY) & OPMASK) | BIT(UC_OP(SEND_ONLY_WITH_IMMEDIATE) & OPMASK) | BIT(UC_OP(RDMA_WRITE_ONLY) & OPMASK) | BIT(UC_OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE) & OPMASK), }; /* * System image GUID. */ __be64 ib_hfi1_sys_image_guid; /* * Make sure the QP is ready and able to accept the given opcode. */ static inline opcode_handler qp_ok(struct hfi1_packet *packet) { if (!(ib_rvt_state_ops[packet->qp->state] & RVT_PROCESS_RECV_OK)) return NULL; if (((packet->opcode & RVT_OPCODE_QP_MASK) == packet->qp->allowed_ops) || (packet->opcode == IB_OPCODE_CNP)) return opcode_handler_tbl[packet->opcode]; return NULL; } static u64 hfi1_fault_tx(struct rvt_qp *qp, u8 opcode, u64 pbc) { #ifdef CONFIG_FAULT_INJECTION if ((opcode & IB_OPCODE_MSP) == IB_OPCODE_MSP) { /* * In order to drop non-IB traffic we * set PbcInsertHrc to NONE (0x2). * The packet will still be delivered * to the receiving node but a * KHdrHCRCErr (KDETH packet with a bad * HCRC) will be triggered and the * packet will not be delivered to the * correct context. */ pbc &= ~PBC_INSERT_HCRC_SMASK; pbc |= (u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT; } else { /* * In order to drop regular verbs * traffic we set the PbcTestEbp * flag. The packet will still be * delivered to the receiving node but * a 'late ebp error' will be * triggered and will be dropped. */ pbc |= PBC_TEST_EBP; } #endif return pbc; } static opcode_handler tid_qp_ok(int opcode, struct hfi1_packet *packet) { if (packet->qp->ibqp.qp_type != IB_QPT_RC || !(ib_rvt_state_ops[packet->qp->state] & RVT_PROCESS_RECV_OK)) return NULL; if ((opcode & RVT_OPCODE_QP_MASK) == IB_OPCODE_TID_RDMA) return opcode_handler_tbl[opcode]; return NULL; } void hfi1_kdeth_eager_rcv(struct hfi1_packet *packet) { struct hfi1_ctxtdata *rcd = packet->rcd; struct ib_header *hdr = packet->hdr; u32 tlen = packet->tlen; struct hfi1_pportdata *ppd = rcd->ppd; struct hfi1_ibport *ibp = &ppd->ibport_data; struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi; opcode_handler opcode_handler; unsigned long flags; u32 qp_num; int lnh; u8 opcode; /* DW == LRH (2) + BTH (3) + KDETH (9) + CRC (1) */ if (unlikely(tlen < 15 * sizeof(u32))) goto drop; lnh = be16_to_cpu(hdr->lrh[0]) & 3; if (lnh != HFI1_LRH_BTH) goto drop; packet->ohdr = &hdr->u.oth; trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf))); opcode = (be32_to_cpu(packet->ohdr->bth[0]) >> 24); inc_opstats(tlen, &rcd->opstats->stats[opcode]); /* verbs_qp can be picked up from any tid_rdma header struct */ qp_num = be32_to_cpu(packet->ohdr->u.tid_rdma.r_req.verbs_qp) & RVT_QPN_MASK; rcu_read_lock(); packet->qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num); if (!packet->qp) goto drop_rcu; spin_lock_irqsave(&packet->qp->r_lock, flags); opcode_handler = tid_qp_ok(opcode, packet); if (likely(opcode_handler)) opcode_handler(packet); else goto drop_unlock; spin_unlock_irqrestore(&packet->qp->r_lock, flags); rcu_read_unlock(); return; drop_unlock: spin_unlock_irqrestore(&packet->qp->r_lock, flags); drop_rcu: rcu_read_unlock(); drop: ibp->rvp.n_pkt_drops++; } void hfi1_kdeth_expected_rcv(struct hfi1_packet *packet) { struct hfi1_ctxtdata *rcd = packet->rcd; struct ib_header *hdr = packet->hdr; u32 tlen = packet->tlen; struct hfi1_pportdata *ppd = rcd->ppd; struct hfi1_ibport *ibp = &ppd->ibport_data; struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi; opcode_handler opcode_handler; unsigned long flags; u32 qp_num; int lnh; u8 opcode; /* DW == LRH (2) + BTH (3) + KDETH (9) + CRC (1) */ if (unlikely(tlen < 15 * sizeof(u32))) goto drop; lnh = be16_to_cpu(hdr->lrh[0]) & 3; if (lnh != HFI1_LRH_BTH) goto drop; packet->ohdr = &hdr->u.oth; trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf))); opcode = (be32_to_cpu(packet->ohdr->bth[0]) >> 24); inc_opstats(tlen, &rcd->opstats->stats[opcode]); /* verbs_qp can be picked up from any tid_rdma header struct */ qp_num = be32_to_cpu(packet->ohdr->u.tid_rdma.r_rsp.verbs_qp) & RVT_QPN_MASK; rcu_read_lock(); packet->qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num); if (!packet->qp) goto drop_rcu; spin_lock_irqsave(&packet->qp->r_lock, flags); opcode_handler = tid_qp_ok(opcode, packet); if (likely(opcode_handler)) opcode_handler(packet); else goto drop_unlock; spin_unlock_irqrestore(&packet->qp->r_lock, flags); rcu_read_unlock(); return; drop_unlock: spin_unlock_irqrestore(&packet->qp->r_lock, flags); drop_rcu: rcu_read_unlock(); drop: ibp->rvp.n_pkt_drops++; } static int hfi1_do_pkey_check(struct hfi1_packet *packet) { struct hfi1_ctxtdata *rcd = packet->rcd; struct hfi1_pportdata *ppd = rcd->ppd; struct hfi1_16b_header *hdr = packet->hdr; u16 pkey; /* Pkey check needed only for bypass packets */ if (packet->etype != RHF_RCV_TYPE_BYPASS) return 0; /* Perform pkey check */ pkey = hfi1_16B_get_pkey(hdr); return ingress_pkey_check(ppd, pkey, packet->sc, packet->qp->s_pkey_index, packet->slid, true); } static inline void hfi1_handle_packet(struct hfi1_packet *packet, bool is_mcast) { u32 qp_num; struct hfi1_ctxtdata *rcd = packet->rcd; struct hfi1_pportdata *ppd = rcd->ppd; struct hfi1_ibport *ibp = rcd_to_iport(rcd); struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi; opcode_handler packet_handler; unsigned long flags; inc_opstats(packet->tlen, &rcd->opstats->stats[packet->opcode]); if (unlikely(is_mcast)) { struct rvt_mcast *mcast; struct rvt_mcast_qp *p; if (!packet->grh) goto drop; mcast = rvt_mcast_find(&ibp->rvp, &packet->grh->dgid, opa_get_lid(packet->dlid, 9B)); if (!mcast) goto drop; rcu_read_lock(); list_for_each_entry_rcu(p, &mcast->qp_list, list) { packet->qp = p->qp; if (hfi1_do_pkey_check(packet)) goto unlock_drop; spin_lock_irqsave(&packet->qp->r_lock, flags); packet_handler = qp_ok(packet); if (likely(packet_handler)) packet_handler(packet); else ibp->rvp.n_pkt_drops++; spin_unlock_irqrestore(&packet->qp->r_lock, flags); } rcu_read_unlock(); /* * Notify rvt_multicast_detach() if it is waiting for us * to finish. */ if (atomic_dec_return(&mcast->refcount) <= 1) wake_up(&mcast->wait); } else { /* Get the destination QP number. */ if (packet->etype == RHF_RCV_TYPE_BYPASS && hfi1_16B_get_l4(packet->hdr) == OPA_16B_L4_FM) qp_num = hfi1_16B_get_dest_qpn(packet->mgmt); else qp_num = ib_bth_get_qpn(packet->ohdr); rcu_read_lock(); packet->qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num); if (!packet->qp) goto unlock_drop; if (hfi1_do_pkey_check(packet)) goto unlock_drop; spin_lock_irqsave(&packet->qp->r_lock, flags); packet_handler = qp_ok(packet); if (likely(packet_handler)) packet_handler(packet); else ibp->rvp.n_pkt_drops++; spin_unlock_irqrestore(&packet->qp->r_lock, flags); rcu_read_unlock(); } return; unlock_drop: rcu_read_unlock(); drop: ibp->rvp.n_pkt_drops++; } /** * hfi1_ib_rcv - process an incoming packet * @packet: data packet information * * This is called to process an incoming packet at interrupt level. */ void hfi1_ib_rcv(struct hfi1_packet *packet) { struct hfi1_ctxtdata *rcd = packet->rcd; trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf))); hfi1_handle_packet(packet, hfi1_check_mcast(packet->dlid)); } void hfi1_16B_rcv(struct hfi1_packet *packet) { struct hfi1_ctxtdata *rcd = packet->rcd; trace_input_ibhdr(rcd->dd, packet, false); hfi1_handle_packet(packet, hfi1_check_mcast(packet->dlid)); } /* * This is called from a timer to check for QPs * which need kernel memory in order to send a packet. */ static void mem_timer(struct timer_list *t) { struct hfi1_ibdev *dev = from_timer(dev, t, mem_timer); struct list_head *list = &dev->memwait; struct rvt_qp *qp = NULL; struct iowait *wait; unsigned long flags; struct hfi1_qp_priv *priv; write_seqlock_irqsave(&dev->iowait_lock, flags); if (!list_empty(list)) { wait = list_first_entry(list, struct iowait, list); qp = iowait_to_qp(wait); priv = qp->priv; list_del_init(&priv->s_iowait.list); priv->s_iowait.lock = NULL; /* refcount held until actual wake up */ if (!list_empty(list)) mod_timer(&dev->mem_timer, jiffies + 1); } write_sequnlock_irqrestore(&dev->iowait_lock, flags); if (qp) hfi1_qp_wakeup(qp, RVT_S_WAIT_KMEM); } /* * This is called with progress side lock held. */ /* New API */ static void verbs_sdma_complete( struct sdma_txreq *cookie, int status) { struct verbs_txreq *tx = container_of(cookie, struct verbs_txreq, txreq); struct rvt_qp *qp = tx->qp; spin_lock(&qp->s_lock); if (tx->wqe) { rvt_send_complete(qp, tx->wqe, IB_WC_SUCCESS); } else if (qp->ibqp.qp_type == IB_QPT_RC) { struct hfi1_opa_header *hdr; hdr = &tx->phdr.hdr; if (unlikely(status == SDMA_TXREQ_S_ABORTED)) hfi1_rc_verbs_aborted(qp, hdr); hfi1_rc_send_complete(qp, hdr); } spin_unlock(&qp->s_lock); hfi1_put_txreq(tx); } void hfi1_wait_kmem(struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; struct ib_qp *ibqp = &qp->ibqp; struct ib_device *ibdev = ibqp->device; struct hfi1_ibdev *dev = to_idev(ibdev); if (list_empty(&priv->s_iowait.list)) { if (list_empty(&dev->memwait)) mod_timer(&dev->mem_timer, jiffies + 1); qp->s_flags |= RVT_S_WAIT_KMEM; list_add_tail(&priv->s_iowait.list, &dev->memwait); priv->s_iowait.lock = &dev->iowait_lock; trace_hfi1_qpsleep(qp, RVT_S_WAIT_KMEM); rvt_get_qp(qp); } } static int wait_kmem(struct hfi1_ibdev *dev, struct rvt_qp *qp, struct hfi1_pkt_state *ps) { unsigned long flags; int ret = 0; spin_lock_irqsave(&qp->s_lock, flags); if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { write_seqlock(&dev->iowait_lock); list_add_tail(&ps->s_txreq->txreq.list, &ps->wait->tx_head); hfi1_wait_kmem(qp); write_sequnlock(&dev->iowait_lock); hfi1_qp_unbusy(qp, ps->wait); ret = -EBUSY; } spin_unlock_irqrestore(&qp->s_lock, flags); return ret; } /* * This routine calls txadds for each sg entry. * * Add failures will revert the sge cursor */ static noinline int build_verbs_ulp_payload( struct sdma_engine *sde, u32 length, struct verbs_txreq *tx) { struct rvt_sge_state *ss = tx->ss; struct rvt_sge *sg_list = ss->sg_list; struct rvt_sge sge = ss->sge; u8 num_sge = ss->num_sge; u32 len; int ret = 0; while (length) { len = rvt_get_sge_length(&ss->sge, length); WARN_ON_ONCE(len == 0); ret = sdma_txadd_kvaddr( sde->dd, &tx->txreq, ss->sge.vaddr, len); if (ret) goto bail_txadd; rvt_update_sge(ss, len, false); length -= len; } return ret; bail_txadd: /* unwind cursor */ ss->sge = sge; ss->num_sge = num_sge; ss->sg_list = sg_list; return ret; } /** * update_tx_opstats - record stats by opcode * @qp: the qp * @ps: transmit packet state * @plen: the plen in dwords * * This is a routine to record the tx opstats after a * packet has been presented to the egress mechanism. */ static void update_tx_opstats(struct rvt_qp *qp, struct hfi1_pkt_state *ps, u32 plen) { #ifdef CONFIG_DEBUG_FS struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); struct hfi1_opcode_stats_perctx *s = get_cpu_ptr(dd->tx_opstats); inc_opstats(plen * 4, &s->stats[ps->opcode]); put_cpu_ptr(s); #endif } /* * Build the number of DMA descriptors needed to send length bytes of data. * * NOTE: DMA mapping is held in the tx until completed in the ring or * the tx desc is freed without having been submitted to the ring * * This routine ensures all the helper routine calls succeed. */ /* New API */ static int build_verbs_tx_desc( struct sdma_engine *sde, u32 length, struct verbs_txreq *tx, struct hfi1_ahg_info *ahg_info, u64 pbc) { int ret = 0; struct hfi1_sdma_header *phdr = &tx->phdr; u16 hdrbytes = (tx->hdr_dwords + sizeof(pbc) / 4) << 2; u8 extra_bytes = 0; if (tx->phdr.hdr.hdr_type) { /* * hdrbytes accounts for PBC. Need to subtract 8 bytes * before calculating padding. */ extra_bytes = hfi1_get_16b_padding(hdrbytes - 8, length) + (SIZE_OF_CRC << 2) + SIZE_OF_LT; } if (!ahg_info->ahgcount) { ret = sdma_txinit_ahg( &tx->txreq, ahg_info->tx_flags, hdrbytes + length + extra_bytes, ahg_info->ahgidx, 0, NULL, 0, verbs_sdma_complete); if (ret) goto bail_txadd; phdr->pbc = cpu_to_le64(pbc); ret = sdma_txadd_kvaddr( sde->dd, &tx->txreq, phdr, hdrbytes); if (ret) goto bail_txadd; } else { ret = sdma_txinit_ahg( &tx->txreq, ahg_info->tx_flags, length, ahg_info->ahgidx, ahg_info->ahgcount, ahg_info->ahgdesc, hdrbytes, verbs_sdma_complete); if (ret) goto bail_txadd; } /* add the ulp payload - if any. tx->ss can be NULL for acks */ if (tx->ss) { ret = build_verbs_ulp_payload(sde, length, tx); if (ret) goto bail_txadd; } /* add icrc, lt byte, and padding to flit */ if (extra_bytes) ret = sdma_txadd_daddr(sde->dd, &tx->txreq, sde->dd->sdma_pad_phys, extra_bytes); bail_txadd: return ret; } static u64 update_hcrc(u8 opcode, u64 pbc) { if ((opcode & IB_OPCODE_TID_RDMA) == IB_OPCODE_TID_RDMA) { pbc &= ~PBC_INSERT_HCRC_SMASK; pbc |= (u64)PBC_IHCRC_LKDETH << PBC_INSERT_HCRC_SHIFT; } return pbc; } int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps, u64 pbc) { struct hfi1_qp_priv *priv = qp->priv; struct hfi1_ahg_info *ahg_info = priv->s_ahg; u32 hdrwords = ps->s_txreq->hdr_dwords; u32 len = ps->s_txreq->s_cur_size; u32 plen; struct hfi1_ibdev *dev = ps->dev; struct hfi1_pportdata *ppd = ps->ppd; struct verbs_txreq *tx; u8 sc5 = priv->s_sc; int ret; u32 dwords; if (ps->s_txreq->phdr.hdr.hdr_type) { u8 extra_bytes = hfi1_get_16b_padding((hdrwords << 2), len); dwords = (len + extra_bytes + (SIZE_OF_CRC << 2) + SIZE_OF_LT) >> 2; } else { dwords = (len + 3) >> 2; } plen = hdrwords + dwords + sizeof(pbc) / 4; tx = ps->s_txreq; if (!sdma_txreq_built(&tx->txreq)) { if (likely(pbc == 0)) { u32 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5); /* No vl15 here */ /* set PBC_DC_INFO bit (aka SC[4]) in pbc */ if (ps->s_txreq->phdr.hdr.hdr_type) pbc |= PBC_PACKET_BYPASS | PBC_INSERT_BYPASS_ICRC; else pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT); pbc = create_pbc(ppd, pbc, qp->srate_mbps, vl, plen); if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode))) pbc = hfi1_fault_tx(qp, ps->opcode, pbc); else /* Update HCRC based on packet opcode */ pbc = update_hcrc(ps->opcode, pbc); } tx->wqe = qp->s_wqe; ret = build_verbs_tx_desc(tx->sde, len, tx, ahg_info, pbc); if (unlikely(ret)) goto bail_build; } ret = sdma_send_txreq(tx->sde, ps->wait, &tx->txreq, ps->pkts_sent); if (unlikely(ret < 0)) { if (ret == -ECOMM) goto bail_ecomm; return ret; } update_tx_opstats(qp, ps, plen); trace_sdma_output_ibhdr(dd_from_ibdev(qp->ibqp.device), &ps->s_txreq->phdr.hdr, ib_is_sc5(sc5)); return ret; bail_ecomm: /* The current one got "sent" */ return 0; bail_build: ret = wait_kmem(dev, qp, ps); if (!ret) { /* free txreq - bad state */ hfi1_put_txreq(ps->s_txreq); ps->s_txreq = NULL; } return ret; } /* * If we are now in the error state, return zero to flush the * send work request. */ static int pio_wait(struct rvt_qp *qp, struct send_context *sc, struct hfi1_pkt_state *ps, u32 flag) { struct hfi1_qp_priv *priv = qp->priv; struct hfi1_devdata *dd = sc->dd; unsigned long flags; int ret = 0; /* * Note that as soon as want_buffer() is called and * possibly before it returns, sc_piobufavail() * could be called. Therefore, put QP on the I/O wait list before * enabling the PIO avail interrupt. */ spin_lock_irqsave(&qp->s_lock, flags); if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { write_seqlock(&sc->waitlock); list_add_tail(&ps->s_txreq->txreq.list, &ps->wait->tx_head); if (list_empty(&priv->s_iowait.list)) { struct hfi1_ibdev *dev = &dd->verbs_dev; int was_empty; dev->n_piowait += !!(flag & RVT_S_WAIT_PIO); dev->n_piodrain += !!(flag & HFI1_S_WAIT_PIO_DRAIN); qp->s_flags |= flag; was_empty = list_empty(&sc->piowait); iowait_get_priority(&priv->s_iowait); iowait_queue(ps->pkts_sent, &priv->s_iowait, &sc->piowait); priv->s_iowait.lock = &sc->waitlock; trace_hfi1_qpsleep(qp, RVT_S_WAIT_PIO); rvt_get_qp(qp); /* counting: only call wantpiobuf_intr if first user */ if (was_empty) hfi1_sc_wantpiobuf_intr(sc, 1); } write_sequnlock(&sc->waitlock); hfi1_qp_unbusy(qp, ps->wait); ret = -EBUSY; } spin_unlock_irqrestore(&qp->s_lock, flags); return ret; } static void verbs_pio_complete(void *arg, int code) { struct rvt_qp *qp = (struct rvt_qp *)arg; struct hfi1_qp_priv *priv = qp->priv; if (iowait_pio_dec(&priv->s_iowait)) iowait_drain_wakeup(&priv->s_iowait); } int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps, u64 pbc) { struct hfi1_qp_priv *priv = qp->priv; u32 hdrwords = ps->s_txreq->hdr_dwords; struct rvt_sge_state *ss = ps->s_txreq->ss; u32 len = ps->s_txreq->s_cur_size; u32 dwords; u32 plen; struct hfi1_pportdata *ppd = ps->ppd; u32 *hdr; u8 sc5; unsigned long flags = 0; struct send_context *sc; struct pio_buf *pbuf; int wc_status = IB_WC_SUCCESS; int ret = 0; pio_release_cb cb = NULL; u8 extra_bytes = 0; if (ps->s_txreq->phdr.hdr.hdr_type) { u8 pad_size = hfi1_get_16b_padding((hdrwords << 2), len); extra_bytes = pad_size + (SIZE_OF_CRC << 2) + SIZE_OF_LT; dwords = (len + extra_bytes) >> 2; hdr = (u32 *)&ps->s_txreq->phdr.hdr.opah; } else { dwords = (len + 3) >> 2; hdr = (u32 *)&ps->s_txreq->phdr.hdr.ibh; } plen = hdrwords + dwords + sizeof(pbc) / 4; /* only RC/UC use complete */ switch (qp->ibqp.qp_type) { case IB_QPT_RC: case IB_QPT_UC: cb = verbs_pio_complete; break; default: break; } /* vl15 special case taken care of in ud.c */ sc5 = priv->s_sc; sc = ps->s_txreq->psc; if (likely(pbc == 0)) { u8 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5); /* set PBC_DC_INFO bit (aka SC[4]) in pbc */ if (ps->s_txreq->phdr.hdr.hdr_type) pbc |= PBC_PACKET_BYPASS | PBC_INSERT_BYPASS_ICRC; else pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT); pbc = create_pbc(ppd, pbc, qp->srate_mbps, vl, plen); if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode))) pbc = hfi1_fault_tx(qp, ps->opcode, pbc); else /* Update HCRC based on packet opcode */ pbc = update_hcrc(ps->opcode, pbc); } if (cb) iowait_pio_inc(&priv->s_iowait); pbuf = sc_buffer_alloc(sc, plen, cb, qp); if (IS_ERR_OR_NULL(pbuf)) { if (cb) verbs_pio_complete(qp, 0); if (IS_ERR(pbuf)) { /* * If we have filled the PIO buffers to capacity and are * not in an active state this request is not going to * go out to so just complete it with an error or else a * ULP or the core may be stuck waiting. */ hfi1_cdbg( PIO, "alloc failed. state not active, completing"); wc_status = IB_WC_GENERAL_ERR; goto pio_bail; } else { /* * This is a normal occurrence. The PIO buffs are full * up but we are still happily sending, well we could be * so lets continue to queue the request. */ hfi1_cdbg(PIO, "alloc failed. state active, queuing"); ret = pio_wait(qp, sc, ps, RVT_S_WAIT_PIO); if (!ret) /* txreq not queued - free */ goto bail; /* tx consumed in wait */ return ret; } } if (dwords == 0) { pio_copy(ppd->dd, pbuf, pbc, hdr, hdrwords); } else { seg_pio_copy_start(pbuf, pbc, hdr, hdrwords * 4); if (ss) { while (len) { void *addr = ss->sge.vaddr; u32 slen = rvt_get_sge_length(&ss->sge, len); rvt_update_sge(ss, slen, false); seg_pio_copy_mid(pbuf, addr, slen); len -= slen; } } /* add icrc, lt byte, and padding to flit */ if (extra_bytes) seg_pio_copy_mid(pbuf, ppd->dd->sdma_pad_dma, extra_bytes); seg_pio_copy_end(pbuf); } update_tx_opstats(qp, ps, plen); trace_pio_output_ibhdr(dd_from_ibdev(qp->ibqp.device), &ps->s_txreq->phdr.hdr, ib_is_sc5(sc5)); pio_bail: spin_lock_irqsave(&qp->s_lock, flags); if (qp->s_wqe) { rvt_send_complete(qp, qp->s_wqe, wc_status); } else if (qp->ibqp.qp_type == IB_QPT_RC) { if (unlikely(wc_status == IB_WC_GENERAL_ERR)) hfi1_rc_verbs_aborted(qp, &ps->s_txreq->phdr.hdr); hfi1_rc_send_complete(qp, &ps->s_txreq->phdr.hdr); } spin_unlock_irqrestore(&qp->s_lock, flags); ret = 0; bail: hfi1_put_txreq(ps->s_txreq); return ret; } /* * egress_pkey_matches_entry - return 1 if the pkey matches ent (ent * being an entry from the partition key table), return 0 * otherwise. Use the matching criteria for egress partition keys * specified in the OPAv1 spec., section 9.1l.7. */ static inline int egress_pkey_matches_entry(u16 pkey, u16 ent) { u16 mkey = pkey & PKEY_LOW_15_MASK; u16 mentry = ent & PKEY_LOW_15_MASK; if (mkey == mentry) { /* * If pkey[15] is set (full partition member), * is bit 15 in the corresponding table element * clear (limited member)? */ if (pkey & PKEY_MEMBER_MASK) return !!(ent & PKEY_MEMBER_MASK); return 1; } return 0; } /** * egress_pkey_check - check P_KEY of a packet * @ppd: Physical IB port data * @slid: SLID for packet * @pkey: PKEY for header * @sc5: SC for packet * @s_pkey_index: It will be used for look up optimization for kernel contexts * only. If it is negative value, then it means user contexts is calling this * function. * * It checks if hdr's pkey is valid. * * Return: 0 on success, otherwise, 1 */ int egress_pkey_check(struct hfi1_pportdata *ppd, u32 slid, u16 pkey, u8 sc5, int8_t s_pkey_index) { struct hfi1_devdata *dd; int i; int is_user_ctxt_mechanism = (s_pkey_index < 0); if (!(ppd->part_enforce & HFI1_PART_ENFORCE_OUT)) return 0; /* If SC15, pkey[0:14] must be 0x7fff */ if ((sc5 == 0xf) && ((pkey & PKEY_LOW_15_MASK) != PKEY_LOW_15_MASK)) goto bad; /* Is the pkey = 0x0, or 0x8000? */ if ((pkey & PKEY_LOW_15_MASK) == 0) goto bad; /* * For the kernel contexts only, if a qp is passed into the function, * the most likely matching pkey has index qp->s_pkey_index */ if (!is_user_ctxt_mechanism && egress_pkey_matches_entry(pkey, ppd->pkeys[s_pkey_index])) { return 0; } for (i = 0; i < MAX_PKEY_VALUES; i++) { if (egress_pkey_matches_entry(pkey, ppd->pkeys[i])) return 0; } bad: /* * For the user-context mechanism, the P_KEY check would only happen * once per SDMA request, not once per packet. Therefore, there's no * need to increment the counter for the user-context mechanism. */ if (!is_user_ctxt_mechanism) { incr_cntr64(&ppd->port_xmit_constraint_errors); dd = ppd->dd; if (!(dd->err_info_xmit_constraint.status & OPA_EI_STATUS_SMASK)) { dd->err_info_xmit_constraint.status |= OPA_EI_STATUS_SMASK; dd->err_info_xmit_constraint.slid = slid; dd->err_info_xmit_constraint.pkey = pkey; } } return 1; } /* * get_send_routine - choose an egress routine * * Choose an egress routine based on QP type * and size */ static inline send_routine get_send_routine(struct rvt_qp *qp, struct hfi1_pkt_state *ps) { struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); struct hfi1_qp_priv *priv = qp->priv; struct verbs_txreq *tx = ps->s_txreq; if (unlikely(!(dd->flags & HFI1_HAS_SEND_DMA))) return dd->process_pio_send; switch (qp->ibqp.qp_type) { case IB_QPT_SMI: return dd->process_pio_send; case IB_QPT_GSI: case IB_QPT_UD: break; case IB_QPT_UC: case IB_QPT_RC: priv->s_running_pkt_size = (tx->s_cur_size + priv->s_running_pkt_size) / 2; if (piothreshold && priv->s_running_pkt_size <= min(piothreshold, qp->pmtu) && (BIT(ps->opcode & OPMASK) & pio_opmask[ps->opcode >> 5]) && iowait_sdma_pending(&priv->s_iowait) == 0 && !sdma_txreq_built(&tx->txreq)) return dd->process_pio_send; break; default: break; } return dd->process_dma_send; } /** * hfi1_verbs_send - send a packet * @qp: the QP to send on * @ps: the state of the packet to send * * Return zero if packet is sent or queued OK. * Return non-zero and clear qp->s_flags RVT_S_BUSY otherwise. */ int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps) { struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); struct hfi1_qp_priv *priv = qp->priv; struct ib_other_headers *ohdr = NULL; send_routine sr; int ret; u16 pkey; u32 slid; u8 l4 = 0; /* locate the pkey within the headers */ if (ps->s_txreq->phdr.hdr.hdr_type) { struct hfi1_16b_header *hdr = &ps->s_txreq->phdr.hdr.opah; l4 = hfi1_16B_get_l4(hdr); if (l4 == OPA_16B_L4_IB_LOCAL) ohdr = &hdr->u.oth; else if (l4 == OPA_16B_L4_IB_GLOBAL) ohdr = &hdr->u.l.oth; slid = hfi1_16B_get_slid(hdr); pkey = hfi1_16B_get_pkey(hdr); } else { struct ib_header *hdr = &ps->s_txreq->phdr.hdr.ibh; u8 lnh = ib_get_lnh(hdr); if (lnh == HFI1_LRH_GRH) ohdr = &hdr->u.l.oth; else ohdr = &hdr->u.oth; slid = ib_get_slid(hdr); pkey = ib_bth_get_pkey(ohdr); } if (likely(l4 != OPA_16B_L4_FM)) ps->opcode = ib_bth_get_opcode(ohdr); else ps->opcode = IB_OPCODE_UD_SEND_ONLY; sr = get_send_routine(qp, ps); ret = egress_pkey_check(dd->pport, slid, pkey, priv->s_sc, qp->s_pkey_index); if (unlikely(ret)) { /* * The value we are returning here does not get propagated to * the verbs caller. Thus we need to complete the request with * error otherwise the caller could be sitting waiting on the * completion event. Only do this for PIO. SDMA has its own * mechanism for handling the errors. So for SDMA we can just * return. */ if (sr == dd->process_pio_send) { unsigned long flags; hfi1_cdbg(PIO, "%s() Failed. Completing with err", __func__); spin_lock_irqsave(&qp->s_lock, flags); rvt_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR); spin_unlock_irqrestore(&qp->s_lock, flags); } return -EINVAL; } if (sr == dd->process_dma_send && iowait_pio_pending(&priv->s_iowait)) return pio_wait(qp, ps->s_txreq->psc, ps, HFI1_S_WAIT_PIO_DRAIN); return sr(qp, ps, 0); } /** * hfi1_fill_device_attr - Fill in rvt dev info device attributes. * @dd: the device data structure */ static void hfi1_fill_device_attr(struct hfi1_devdata *dd) { struct rvt_dev_info *rdi = &dd->verbs_dev.rdi; u32 ver = dd->dc8051_ver; memset(&rdi->dparms.props, 0, sizeof(rdi->dparms.props)); rdi->dparms.props.fw_ver = ((u64)(dc8051_ver_maj(ver)) << 32) | ((u64)(dc8051_ver_min(ver)) << 16) | (u64)dc8051_ver_patch(ver); rdi->dparms.props.device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR | IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT | IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN | IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE | IB_DEVICE_MEM_MGT_EXTENSIONS; rdi->dparms.props.kernel_cap_flags = IBK_RDMA_NETDEV_OPA; rdi->dparms.props.page_size_cap = PAGE_SIZE; rdi->dparms.props.vendor_id = dd->oui1 << 16 | dd->oui2 << 8 | dd->oui3; rdi->dparms.props.vendor_part_id = dd->pcidev->device; rdi->dparms.props.hw_ver = dd->minrev; rdi->dparms.props.sys_image_guid = ib_hfi1_sys_image_guid; rdi->dparms.props.max_mr_size = U64_MAX; rdi->dparms.props.max_fast_reg_page_list_len = UINT_MAX; rdi->dparms.props.max_qp = hfi1_max_qps; rdi->dparms.props.max_qp_wr = (hfi1_max_qp_wrs >= HFI1_QP_WQE_INVALID ? HFI1_QP_WQE_INVALID - 1 : hfi1_max_qp_wrs); rdi->dparms.props.max_send_sge = hfi1_max_sges; rdi->dparms.props.max_recv_sge = hfi1_max_sges; rdi->dparms.props.max_sge_rd = hfi1_max_sges; rdi->dparms.props.max_cq = hfi1_max_cqs; rdi->dparms.props.max_ah = hfi1_max_ahs; rdi->dparms.props.max_cqe = hfi1_max_cqes; rdi->dparms.props.max_pd = hfi1_max_pds; rdi->dparms.props.max_qp_rd_atom = HFI1_MAX_RDMA_ATOMIC; rdi->dparms.props.max_qp_init_rd_atom = 255; rdi->dparms.props.max_srq = hfi1_max_srqs; rdi->dparms.props.max_srq_wr = hfi1_max_srq_wrs; rdi->dparms.props.max_srq_sge = hfi1_max_srq_sges; rdi->dparms.props.atomic_cap = IB_ATOMIC_GLOB; rdi->dparms.props.max_pkeys = hfi1_get_npkeys(dd); rdi->dparms.props.max_mcast_grp = hfi1_max_mcast_grps; rdi->dparms.props.max_mcast_qp_attach = hfi1_max_mcast_qp_attached; rdi->dparms.props.max_total_mcast_qp_attach = rdi->dparms.props.max_mcast_qp_attach * rdi->dparms.props.max_mcast_grp; } static inline u16 opa_speed_to_ib(u16 in) { u16 out = 0; if (in & OPA_LINK_SPEED_25G) out |= IB_SPEED_EDR; if (in & OPA_LINK_SPEED_12_5G) out |= IB_SPEED_FDR; return out; } /* * Convert a single OPA link width (no multiple flags) to an IB value. * A zero OPA link width means link down, which means the IB width value * is a don't care. */ static inline u16 opa_width_to_ib(u16 in) { switch (in) { case OPA_LINK_WIDTH_1X: /* map 2x and 3x to 1x as they don't exist in IB */ case OPA_LINK_WIDTH_2X: case OPA_LINK_WIDTH_3X: return IB_WIDTH_1X; default: /* link down or unknown, return our largest width */ case OPA_LINK_WIDTH_4X: return IB_WIDTH_4X; } } static int query_port(struct rvt_dev_info *rdi, u32 port_num, struct ib_port_attr *props) { struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi); struct hfi1_devdata *dd = dd_from_dev(verbs_dev); struct hfi1_pportdata *ppd = &dd->pport[port_num - 1]; u32 lid = ppd->lid; /* props being zeroed by the caller, avoid zeroing it here */ props->lid = lid ? lid : 0; props->lmc = ppd->lmc; /* OPA logical states match IB logical states */ props->state = driver_lstate(ppd); props->phys_state = driver_pstate(ppd); props->gid_tbl_len = HFI1_GUIDS_PER_PORT; props->active_width = (u8)opa_width_to_ib(ppd->link_width_active); /* see rate_show() in ib core/sysfs.c */ props->active_speed = opa_speed_to_ib(ppd->link_speed_active); props->max_vl_num = ppd->vls_supported; /* Once we are a "first class" citizen and have added the OPA MTUs to * the core we can advertise the larger MTU enum to the ULPs, for now * advertise only 4K. * * Those applications which are either OPA aware or pass the MTU enum * from the Path Records to us will get the new 8k MTU. Those that * attempt to process the MTU enum may fail in various ways. */ props->max_mtu = mtu_to_enum((!valid_ib_mtu(hfi1_max_mtu) ? 4096 : hfi1_max_mtu), IB_MTU_4096); props->active_mtu = !valid_ib_mtu(ppd->ibmtu) ? props->max_mtu : mtu_to_enum(ppd->ibmtu, IB_MTU_4096); props->phys_mtu = hfi1_max_mtu; return 0; } static int modify_device(struct ib_device *device, int device_modify_mask, struct ib_device_modify *device_modify) { struct hfi1_devdata *dd = dd_from_ibdev(device); unsigned i; int ret; if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID | IB_DEVICE_MODIFY_NODE_DESC)) { ret = -EOPNOTSUPP; goto bail; } if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) { memcpy(device->node_desc, device_modify->node_desc, IB_DEVICE_NODE_DESC_MAX); for (i = 0; i < dd->num_pports; i++) { struct hfi1_ibport *ibp = &dd->pport[i].ibport_data; hfi1_node_desc_chg(ibp); } } if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) { ib_hfi1_sys_image_guid = cpu_to_be64(device_modify->sys_image_guid); for (i = 0; i < dd->num_pports; i++) { struct hfi1_ibport *ibp = &dd->pport[i].ibport_data; hfi1_sys_guid_chg(ibp); } } ret = 0; bail: return ret; } static int shut_down_port(struct rvt_dev_info *rdi, u32 port_num) { struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi); struct hfi1_devdata *dd = dd_from_dev(verbs_dev); struct hfi1_pportdata *ppd = &dd->pport[port_num - 1]; set_link_down_reason(ppd, OPA_LINKDOWN_REASON_UNKNOWN, 0, OPA_LINKDOWN_REASON_UNKNOWN); return set_link_state(ppd, HLS_DN_DOWNDEF); } static int hfi1_get_guid_be(struct rvt_dev_info *rdi, struct rvt_ibport *rvp, int guid_index, __be64 *guid) { struct hfi1_ibport *ibp = container_of(rvp, struct hfi1_ibport, rvp); if (guid_index >= HFI1_GUIDS_PER_PORT) return -EINVAL; *guid = get_sguid(ibp, guid_index); return 0; } /* * convert ah port,sl to sc */ u8 ah_to_sc(struct ib_device *ibdev, struct rdma_ah_attr *ah) { struct hfi1_ibport *ibp = to_iport(ibdev, rdma_ah_get_port_num(ah)); return ibp->sl_to_sc[rdma_ah_get_sl(ah)]; } static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr) { struct hfi1_ibport *ibp; struct hfi1_pportdata *ppd; struct hfi1_devdata *dd; u8 sc5; u8 sl; if (hfi1_check_mcast(rdma_ah_get_dlid(ah_attr)) && !(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) return -EINVAL; /* test the mapping for validity */ ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr)); ppd = ppd_from_ibp(ibp); dd = dd_from_ppd(ppd); sl = rdma_ah_get_sl(ah_attr); if (sl >= ARRAY_SIZE(ibp->sl_to_sc)) return -EINVAL; sl = array_index_nospec(sl, ARRAY_SIZE(ibp->sl_to_sc)); sc5 = ibp->sl_to_sc[sl]; if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf) return -EINVAL; return 0; } static void hfi1_notify_new_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr, struct rvt_ah *ah) { struct hfi1_ibport *ibp; struct hfi1_pportdata *ppd; struct hfi1_devdata *dd; u8 sc5; struct rdma_ah_attr *attr = &ah->attr; /* * Do not trust reading anything from rvt_ah at this point as it is not * done being setup. We can however modify things which we need to set. */ ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr)); ppd = ppd_from_ibp(ibp); sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&ah->attr)]; hfi1_update_ah_attr(ibdev, attr); hfi1_make_opa_lid(attr); dd = dd_from_ppd(ppd); ah->vl = sc_to_vlt(dd, sc5); if (ah->vl < num_vls || ah->vl == 15) ah->log_pmtu = ilog2(dd->vld[ah->vl].mtu); } /** * hfi1_get_npkeys - return the size of the PKEY table for context 0 * @dd: the hfi1_ib device */ unsigned hfi1_get_npkeys(struct hfi1_devdata *dd) { return ARRAY_SIZE(dd->pport[0].pkeys); } static void init_ibport(struct hfi1_pportdata *ppd) { struct hfi1_ibport *ibp = &ppd->ibport_data; size_t sz = ARRAY_SIZE(ibp->sl_to_sc); int i; for (i = 0; i < sz; i++) { ibp->sl_to_sc[i] = i; ibp->sc_to_sl[i] = i; } for (i = 0; i < RVT_MAX_TRAP_LISTS ; i++) INIT_LIST_HEAD(&ibp->rvp.trap_lists[i].list); timer_setup(&ibp->rvp.trap_timer, hfi1_handle_trap_timer, 0); spin_lock_init(&ibp->rvp.lock); /* Set the prefix to the default value (see ch. 4.1.1) */ ibp->rvp.gid_prefix = IB_DEFAULT_GID_PREFIX; ibp->rvp.sm_lid = 0; /* * Below should only set bits defined in OPA PortInfo.CapabilityMask * and PortInfo.CapabilityMask3 */ ibp->rvp.port_cap_flags = IB_PORT_AUTO_MIGR_SUP | IB_PORT_CAP_MASK_NOTICE_SUP; ibp->rvp.port_cap3_flags = OPA_CAP_MASK3_IsSharedSpaceSupported; ibp->rvp.pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA; ibp->rvp.pma_counter_select[1] = IB_PMA_PORT_RCV_DATA; ibp->rvp.pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS; ibp->rvp.pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS; ibp->rvp.pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT; RCU_INIT_POINTER(ibp->rvp.qp[0], NULL); RCU_INIT_POINTER(ibp->rvp.qp[1], NULL); } static void hfi1_get_dev_fw_str(struct ib_device *ibdev, char *str) { struct rvt_dev_info *rdi = ib_to_rvt(ibdev); struct hfi1_ibdev *dev = dev_from_rdi(rdi); u32 ver = dd_from_dev(dev)->dc8051_ver; snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u.%u", dc8051_ver_maj(ver), dc8051_ver_min(ver), dc8051_ver_patch(ver)); } static const char * const driver_cntr_names[] = { /* must be element 0*/ "DRIVER_KernIntr", "DRIVER_ErrorIntr", "DRIVER_Tx_Errs", "DRIVER_Rcv_Errs", "DRIVER_HW_Errs", "DRIVER_NoPIOBufs", "DRIVER_CtxtsOpen", "DRIVER_RcvLen_Errs", "DRIVER_EgrBufFull", "DRIVER_EgrHdrFull" }; static struct rdma_stat_desc *dev_cntr_descs; static struct rdma_stat_desc *port_cntr_descs; int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names); static int num_dev_cntrs; static int num_port_cntrs; /* * Convert a list of names separated by '\n' into an array of NULL terminated * strings. Optionally some entries can be reserved in the array to hold extra * external strings. */ static int init_cntr_names(const char *names_in, const size_t names_len, int num_extra_names, int *num_cntrs, struct rdma_stat_desc **cntr_descs) { struct rdma_stat_desc *names_out; char *p; int i, n; n = 0; for (i = 0; i < names_len; i++) if (names_in[i] == '\n') n++; names_out = kzalloc((n + num_extra_names) * sizeof(*names_out) + names_len, GFP_KERNEL); if (!names_out) { *num_cntrs = 0; *cntr_descs = NULL; return -ENOMEM; } p = (char *)&names_out[n + num_extra_names]; memcpy(p, names_in, names_len); for (i = 0; i < n; i++) { names_out[i].name = p; p = strchr(p, '\n'); *p++ = '\0'; } *num_cntrs = n; *cntr_descs = names_out; return 0; } static struct rdma_hw_stats *hfi1_alloc_hw_device_stats(struct ib_device *ibdev) { if (!dev_cntr_descs) { struct hfi1_devdata *dd = dd_from_ibdev(ibdev); int i, err; err = init_cntr_names(dd->cntrnames, dd->cntrnameslen, num_driver_cntrs, &num_dev_cntrs, &dev_cntr_descs); if (err) return NULL; for (i = 0; i < num_driver_cntrs; i++) dev_cntr_descs[num_dev_cntrs + i].name = driver_cntr_names[i]; } return rdma_alloc_hw_stats_struct(dev_cntr_descs, num_dev_cntrs + num_driver_cntrs, RDMA_HW_STATS_DEFAULT_LIFESPAN); } static struct rdma_hw_stats *hfi_alloc_hw_port_stats(struct ib_device *ibdev, u32 port_num) { if (!port_cntr_descs) { struct hfi1_devdata *dd = dd_from_ibdev(ibdev); int err; err = init_cntr_names(dd->portcntrnames, dd->portcntrnameslen, 0, &num_port_cntrs, &port_cntr_descs); if (err) return NULL; } return rdma_alloc_hw_stats_struct(port_cntr_descs, num_port_cntrs, RDMA_HW_STATS_DEFAULT_LIFESPAN); } static u64 hfi1_sps_ints(void) { unsigned long index, flags; struct hfi1_devdata *dd; u64 sps_ints = 0; xa_lock_irqsave(&hfi1_dev_table, flags); xa_for_each(&hfi1_dev_table, index, dd) { sps_ints += get_all_cpu_total(dd->int_counter); } xa_unlock_irqrestore(&hfi1_dev_table, flags); return sps_ints; } static int get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, u32 port, int index) { u64 *values; int count; if (!port) { u64 *stats = (u64 *)&hfi1_stats; int i; hfi1_read_cntrs(dd_from_ibdev(ibdev), NULL, &values); values[num_dev_cntrs] = hfi1_sps_ints(); for (i = 1; i < num_driver_cntrs; i++) values[num_dev_cntrs + i] = stats[i]; count = num_dev_cntrs + num_driver_cntrs; } else { struct hfi1_ibport *ibp = to_iport(ibdev, port); hfi1_read_portcntrs(ppd_from_ibp(ibp), NULL, &values); count = num_port_cntrs; } memcpy(stats->value, values, count * sizeof(u64)); return count; } static const struct ib_device_ops hfi1_dev_ops = { .owner = THIS_MODULE, .driver_id = RDMA_DRIVER_HFI1, .alloc_hw_device_stats = hfi1_alloc_hw_device_stats, .alloc_hw_port_stats = hfi_alloc_hw_port_stats, .alloc_rdma_netdev = hfi1_vnic_alloc_rn, .device_group = &ib_hfi1_attr_group, .get_dev_fw_str = hfi1_get_dev_fw_str, .get_hw_stats = get_hw_stats, .modify_device = modify_device, .port_groups = hfi1_attr_port_groups, /* keep process mad in the driver */ .process_mad = hfi1_process_mad, .rdma_netdev_get_params = hfi1_ipoib_rn_get_params, }; /** * hfi1_register_ib_device - register our device with the infiniband core * @dd: the device data structure * Return 0 if successful, errno if unsuccessful. */ int hfi1_register_ib_device(struct hfi1_devdata *dd) { struct hfi1_ibdev *dev = &dd->verbs_dev; struct ib_device *ibdev = &dev->rdi.ibdev; struct hfi1_pportdata *ppd = dd->pport; struct hfi1_ibport *ibp = &ppd->ibport_data; unsigned i; int ret; for (i = 0; i < dd->num_pports; i++) init_ibport(ppd + i); /* Only need to initialize non-zero fields. */ timer_setup(&dev->mem_timer, mem_timer, 0); seqlock_init(&dev->iowait_lock); seqlock_init(&dev->txwait_lock); INIT_LIST_HEAD(&dev->txwait); INIT_LIST_HEAD(&dev->memwait); ret = verbs_txreq_init(dev); if (ret) goto err_verbs_txreq; /* Use first-port GUID as node guid */ ibdev->node_guid = get_sguid(ibp, HFI1_PORT_GUID_INDEX); /* * The system image GUID is supposed to be the same for all * HFIs in a single system but since there can be other * device types in the system, we can't be sure this is unique. */ if (!ib_hfi1_sys_image_guid) ib_hfi1_sys_image_guid = ibdev->node_guid; ibdev->phys_port_cnt = dd->num_pports; ibdev->dev.parent = &dd->pcidev->dev; ib_set_device_ops(ibdev, &hfi1_dev_ops); strscpy(ibdev->node_desc, init_utsname()->nodename, sizeof(ibdev->node_desc)); /* * Fill in rvt info object. */ dd->verbs_dev.rdi.driver_f.get_pci_dev = get_pci_dev; dd->verbs_dev.rdi.driver_f.check_ah = hfi1_check_ah; dd->verbs_dev.rdi.driver_f.notify_new_ah = hfi1_notify_new_ah; dd->verbs_dev.rdi.driver_f.get_guid_be = hfi1_get_guid_be; dd->verbs_dev.rdi.driver_f.query_port_state = query_port; dd->verbs_dev.rdi.driver_f.shut_down_port = shut_down_port; dd->verbs_dev.rdi.driver_f.cap_mask_chg = hfi1_cap_mask_chg; /* * Fill in rvt info device attributes. */ hfi1_fill_device_attr(dd); /* queue pair */ dd->verbs_dev.rdi.dparms.qp_table_size = hfi1_qp_table_size; dd->verbs_dev.rdi.dparms.qpn_start = 0; dd->verbs_dev.rdi.dparms.qpn_inc = 1; dd->verbs_dev.rdi.dparms.qos_shift = dd->qos_shift; dd->verbs_dev.rdi.dparms.qpn_res_start = RVT_KDETH_QP_BASE; dd->verbs_dev.rdi.dparms.qpn_res_end = RVT_AIP_QP_MAX; dd->verbs_dev.rdi.dparms.max_rdma_atomic = HFI1_MAX_RDMA_ATOMIC; dd->verbs_dev.rdi.dparms.psn_mask = PSN_MASK; dd->verbs_dev.rdi.dparms.psn_shift = PSN_SHIFT; dd->verbs_dev.rdi.dparms.psn_modify_mask = PSN_MODIFY_MASK; dd->verbs_dev.rdi.dparms.core_cap_flags = RDMA_CORE_PORT_INTEL_OPA | RDMA_CORE_CAP_OPA_AH; dd->verbs_dev.rdi.dparms.max_mad_size = OPA_MGMT_MAD_SIZE; dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qp_priv_alloc; dd->verbs_dev.rdi.driver_f.qp_priv_init = hfi1_qp_priv_init; dd->verbs_dev.rdi.driver_f.qp_priv_free = qp_priv_free; dd->verbs_dev.rdi.driver_f.free_all_qps = free_all_qps; dd->verbs_dev.rdi.driver_f.notify_qp_reset = notify_qp_reset; dd->verbs_dev.rdi.driver_f.do_send = hfi1_do_send_from_rvt; dd->verbs_dev.rdi.driver_f.schedule_send = hfi1_schedule_send; dd->verbs_dev.rdi.driver_f.schedule_send_no_lock = _hfi1_schedule_send; dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = get_pmtu_from_attr; dd->verbs_dev.rdi.driver_f.notify_error_qp = notify_error_qp; dd->verbs_dev.rdi.driver_f.flush_qp_waiters = flush_qp_waiters; dd->verbs_dev.rdi.driver_f.stop_send_queue = stop_send_queue; dd->verbs_dev.rdi.driver_f.quiesce_qp = quiesce_qp; dd->verbs_dev.rdi.driver_f.notify_error_qp = notify_error_qp; dd->verbs_dev.rdi.driver_f.mtu_from_qp = mtu_from_qp; dd->verbs_dev.rdi.driver_f.mtu_to_path_mtu = mtu_to_path_mtu; dd->verbs_dev.rdi.driver_f.check_modify_qp = hfi1_check_modify_qp; dd->verbs_dev.rdi.driver_f.modify_qp = hfi1_modify_qp; dd->verbs_dev.rdi.driver_f.notify_restart_rc = hfi1_restart_rc; dd->verbs_dev.rdi.driver_f.setup_wqe = hfi1_setup_wqe; dd->verbs_dev.rdi.driver_f.comp_vect_cpu_lookup = hfi1_comp_vect_mappings_lookup; /* completeion queue */ dd->verbs_dev.rdi.ibdev.num_comp_vectors = dd->comp_vect_possible_cpus; dd->verbs_dev.rdi.dparms.node = dd->node; /* misc settings */ dd->verbs_dev.rdi.flags = 0; /* Let rdmavt handle it all */ dd->verbs_dev.rdi.dparms.lkey_table_size = hfi1_lkey_table_size; dd->verbs_dev.rdi.dparms.nports = dd->num_pports; dd->verbs_dev.rdi.dparms.npkeys = hfi1_get_npkeys(dd); dd->verbs_dev.rdi.dparms.sge_copy_mode = sge_copy_mode; dd->verbs_dev.rdi.dparms.wss_threshold = wss_threshold; dd->verbs_dev.rdi.dparms.wss_clean_period = wss_clean_period; dd->verbs_dev.rdi.dparms.reserved_operations = 1; dd->verbs_dev.rdi.dparms.extra_rdma_atomic = HFI1_TID_RDMA_WRITE_CNT; /* post send table */ dd->verbs_dev.rdi.post_parms = hfi1_post_parms; /* opcode translation table */ dd->verbs_dev.rdi.wc_opcode = ib_hfi1_wc_opcode; ppd = dd->pport; for (i = 0; i < dd->num_pports; i++, ppd++) rvt_init_port(&dd->verbs_dev.rdi, &ppd->ibport_data.rvp, i, ppd->pkeys); ret = rvt_register_device(&dd->verbs_dev.rdi); if (ret) goto err_verbs_txreq; ret = hfi1_verbs_register_sysfs(dd); if (ret) goto err_class; return ret; err_class: rvt_unregister_device(&dd->verbs_dev.rdi); err_verbs_txreq: verbs_txreq_exit(dev); dd_dev_err(dd, "cannot register verbs: %d!\n", -ret); return ret; } void hfi1_unregister_ib_device(struct hfi1_devdata *dd) { struct hfi1_ibdev *dev = &dd->verbs_dev; hfi1_verbs_unregister_sysfs(dd); rvt_unregister_device(&dd->verbs_dev.rdi); if (!list_empty(&dev->txwait)) dd_dev_err(dd, "txwait list not empty!\n"); if (!list_empty(&dev->memwait)) dd_dev_err(dd, "memwait list not empty!\n"); del_timer_sync(&dev->mem_timer); verbs_txreq_exit(dev); kfree(dev_cntr_descs); kfree(port_cntr_descs); dev_cntr_descs = NULL; port_cntr_descs = NULL; } void hfi1_cnp_rcv(struct hfi1_packet *packet) { struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); struct ib_header *hdr = packet->hdr; struct rvt_qp *qp = packet->qp; u32 lqpn, rqpn = 0; u16 rlid = 0; u8 sl, sc5, svc_type; switch (packet->qp->ibqp.qp_type) { case IB_QPT_UC: rlid = rdma_ah_get_dlid(&qp->remote_ah_attr); rqpn = qp->remote_qpn; svc_type = IB_CC_SVCTYPE_UC; break; case IB_QPT_RC: rlid = rdma_ah_get_dlid(&qp->remote_ah_attr); rqpn = qp->remote_qpn; svc_type = IB_CC_SVCTYPE_RC; break; case IB_QPT_SMI: case IB_QPT_GSI: case IB_QPT_UD: svc_type = IB_CC_SVCTYPE_UD; break; default: ibp->rvp.n_pkt_drops++; return; } sc5 = hfi1_9B_get_sc5(hdr, packet->rhf); sl = ibp->sc_to_sl[sc5]; lqpn = qp->ibqp.qp_num; process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type); }
linux-master
drivers/infiniband/hw/hfi1/verbs.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2015-2020 Intel Corporation. * Copyright(c) 2021 Cornelis Networks. */ #include <linux/spinlock.h> #include <linux/pci.h> #include <linux/io.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/vmalloc.h> #include <linux/module.h> #include <linux/prefetch.h> #include <rdma/ib_verbs.h> #include <linux/etherdevice.h> #include "hfi.h" #include "trace.h" #include "qp.h" #include "sdma.h" #include "debugfs.h" #include "vnic.h" #include "fault.h" #include "ipoib.h" #include "netdev.h" #undef pr_fmt #define pr_fmt(fmt) DRIVER_NAME ": " fmt DEFINE_MUTEX(hfi1_mutex); /* general driver use */ unsigned int hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU; module_param_named(max_mtu, hfi1_max_mtu, uint, S_IRUGO); MODULE_PARM_DESC(max_mtu, "Set max MTU bytes, default is " __stringify( HFI1_DEFAULT_MAX_MTU)); unsigned int hfi1_cu = 1; module_param_named(cu, hfi1_cu, uint, S_IRUGO); MODULE_PARM_DESC(cu, "Credit return units"); unsigned long hfi1_cap_mask = HFI1_CAP_MASK_DEFAULT; static int hfi1_caps_set(const char *val, const struct kernel_param *kp); static int hfi1_caps_get(char *buffer, const struct kernel_param *kp); static const struct kernel_param_ops cap_ops = { .set = hfi1_caps_set, .get = hfi1_caps_get }; module_param_cb(cap_mask, &cap_ops, &hfi1_cap_mask, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(cap_mask, "Bit mask of enabled/disabled HW features"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_DESCRIPTION("Cornelis Omni-Path Express driver"); /* * MAX_PKT_RCV is the max # if packets processed per receive interrupt. */ #define MAX_PKT_RECV 64 /* * MAX_PKT_THREAD_RCV is the max # of packets processed before * the qp_wait_list queue is flushed. */ #define MAX_PKT_RECV_THREAD (MAX_PKT_RECV * 4) #define EGR_HEAD_UPDATE_THRESHOLD 16 struct hfi1_ib_stats hfi1_stats; static int hfi1_caps_set(const char *val, const struct kernel_param *kp) { int ret = 0; unsigned long *cap_mask_ptr = (unsigned long *)kp->arg, cap_mask = *cap_mask_ptr, value, diff, write_mask = ((HFI1_CAP_WRITABLE_MASK << HFI1_CAP_USER_SHIFT) | HFI1_CAP_WRITABLE_MASK); ret = kstrtoul(val, 0, &value); if (ret) { pr_warn("Invalid module parameter value for 'cap_mask'\n"); goto done; } /* Get the changed bits (except the locked bit) */ diff = value ^ (cap_mask & ~HFI1_CAP_LOCKED_SMASK); /* Remove any bits that are not allowed to change after driver load */ if (HFI1_CAP_LOCKED() && (diff & ~write_mask)) { pr_warn("Ignoring non-writable capability bits %#lx\n", diff & ~write_mask); diff &= write_mask; } /* Mask off any reserved bits */ diff &= ~HFI1_CAP_RESERVED_MASK; /* Clear any previously set and changing bits */ cap_mask &= ~diff; /* Update the bits with the new capability */ cap_mask |= (value & diff); /* Check for any kernel/user restrictions */ diff = (cap_mask & (HFI1_CAP_MUST_HAVE_KERN << HFI1_CAP_USER_SHIFT)) ^ ((cap_mask & HFI1_CAP_MUST_HAVE_KERN) << HFI1_CAP_USER_SHIFT); cap_mask &= ~diff; /* Set the bitmask to the final set */ *cap_mask_ptr = cap_mask; done: return ret; } static int hfi1_caps_get(char *buffer, const struct kernel_param *kp) { unsigned long cap_mask = *(unsigned long *)kp->arg; cap_mask &= ~HFI1_CAP_LOCKED_SMASK; cap_mask |= ((cap_mask & HFI1_CAP_K2U) << HFI1_CAP_USER_SHIFT); return sysfs_emit(buffer, "0x%lx\n", cap_mask); } struct pci_dev *get_pci_dev(struct rvt_dev_info *rdi) { struct hfi1_ibdev *ibdev = container_of(rdi, struct hfi1_ibdev, rdi); struct hfi1_devdata *dd = container_of(ibdev, struct hfi1_devdata, verbs_dev); return dd->pcidev; } /* * Return count of units with at least one port ACTIVE. */ int hfi1_count_active_units(void) { struct hfi1_devdata *dd; struct hfi1_pportdata *ppd; unsigned long index, flags; int pidx, nunits_active = 0; xa_lock_irqsave(&hfi1_dev_table, flags); xa_for_each(&hfi1_dev_table, index, dd) { if (!(dd->flags & HFI1_PRESENT) || !dd->kregbase1) continue; for (pidx = 0; pidx < dd->num_pports; ++pidx) { ppd = dd->pport + pidx; if (ppd->lid && ppd->linkup) { nunits_active++; break; } } } xa_unlock_irqrestore(&hfi1_dev_table, flags); return nunits_active; } /* * Get address of eager buffer from it's index (allocated in chunks, not * contiguous). */ static inline void *get_egrbuf(const struct hfi1_ctxtdata *rcd, u64 rhf, u8 *update) { u32 idx = rhf_egr_index(rhf), offset = rhf_egr_buf_offset(rhf); *update |= !(idx & (rcd->egrbufs.threshold - 1)) && !offset; return (void *)(((u64)(rcd->egrbufs.rcvtids[idx].addr)) + (offset * RCV_BUF_BLOCK_SIZE)); } static inline void *hfi1_get_header(struct hfi1_ctxtdata *rcd, __le32 *rhf_addr) { u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr)); return (void *)(rhf_addr - rcd->rhf_offset + offset); } static inline struct ib_header *hfi1_get_msgheader(struct hfi1_ctxtdata *rcd, __le32 *rhf_addr) { return (struct ib_header *)hfi1_get_header(rcd, rhf_addr); } static inline struct hfi1_16b_header *hfi1_get_16B_header(struct hfi1_ctxtdata *rcd, __le32 *rhf_addr) { return (struct hfi1_16b_header *)hfi1_get_header(rcd, rhf_addr); } /* * Validate and encode the a given RcvArray Buffer size. * The function will check whether the given size falls within * allowed size ranges for the respective type and, optionally, * return the proper encoding. */ int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encoded) { if (unlikely(!PAGE_ALIGNED(size))) return 0; if (unlikely(size < MIN_EAGER_BUFFER)) return 0; if (size > (type == PT_EAGER ? MAX_EAGER_BUFFER : MAX_EXPECTED_BUFFER)) return 0; if (encoded) *encoded = ilog2(size / PAGE_SIZE) + 1; return 1; } static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd, struct hfi1_packet *packet) { struct ib_header *rhdr = packet->hdr; u32 rte = rhf_rcv_type_err(packet->rhf); u32 mlid_base; struct hfi1_ibport *ibp = rcd_to_iport(rcd); struct hfi1_devdata *dd = ppd->dd; struct hfi1_ibdev *verbs_dev = &dd->verbs_dev; struct rvt_dev_info *rdi = &verbs_dev->rdi; if ((packet->rhf & RHF_DC_ERR) && hfi1_dbg_fault_suppress_err(verbs_dev)) return; if (packet->rhf & RHF_ICRC_ERR) return; if (packet->etype == RHF_RCV_TYPE_BYPASS) { goto drop; } else { u8 lnh = ib_get_lnh(rhdr); mlid_base = be16_to_cpu(IB_MULTICAST_LID_BASE); if (lnh == HFI1_LRH_BTH) { packet->ohdr = &rhdr->u.oth; } else if (lnh == HFI1_LRH_GRH) { packet->ohdr = &rhdr->u.l.oth; packet->grh = &rhdr->u.l.grh; } else { goto drop; } } if (packet->rhf & RHF_TID_ERR) { /* For TIDERR and RC QPs preemptively schedule a NAK */ u32 tlen = rhf_pkt_len(packet->rhf); /* in bytes */ u32 dlid = ib_get_dlid(rhdr); u32 qp_num; /* Sanity check packet */ if (tlen < 24) goto drop; /* Check for GRH */ if (packet->grh) { u32 vtf; struct ib_grh *grh = packet->grh; if (grh->next_hdr != IB_GRH_NEXT_HDR) goto drop; vtf = be32_to_cpu(grh->version_tclass_flow); if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION) goto drop; } /* Get the destination QP number. */ qp_num = ib_bth_get_qpn(packet->ohdr); if (dlid < mlid_base) { struct rvt_qp *qp; unsigned long flags; rcu_read_lock(); qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num); if (!qp) { rcu_read_unlock(); goto drop; } /* * Handle only RC QPs - for other QP types drop error * packet. */ spin_lock_irqsave(&qp->r_lock, flags); /* Check for valid receive state. */ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) { ibp->rvp.n_pkt_drops++; } switch (qp->ibqp.qp_type) { case IB_QPT_RC: hfi1_rc_hdrerr(rcd, packet, qp); break; default: /* For now don't handle any other QP types */ break; } spin_unlock_irqrestore(&qp->r_lock, flags); rcu_read_unlock(); } /* Unicast QP */ } /* Valid packet with TIDErr */ /* handle "RcvTypeErr" flags */ switch (rte) { case RHF_RTE_ERROR_OP_CODE_ERR: { void *ebuf = NULL; u8 opcode; if (rhf_use_egr_bfr(packet->rhf)) ebuf = packet->ebuf; if (!ebuf) goto drop; /* this should never happen */ opcode = ib_bth_get_opcode(packet->ohdr); if (opcode == IB_OPCODE_CNP) { /* * Only in pre-B0 h/w is the CNP_OPCODE handled * via this code path. */ struct rvt_qp *qp = NULL; u32 lqpn, rqpn; u16 rlid; u8 svc_type, sl, sc5; sc5 = hfi1_9B_get_sc5(rhdr, packet->rhf); sl = ibp->sc_to_sl[sc5]; lqpn = ib_bth_get_qpn(packet->ohdr); rcu_read_lock(); qp = rvt_lookup_qpn(rdi, &ibp->rvp, lqpn); if (!qp) { rcu_read_unlock(); goto drop; } switch (qp->ibqp.qp_type) { case IB_QPT_UD: rlid = 0; rqpn = 0; svc_type = IB_CC_SVCTYPE_UD; break; case IB_QPT_UC: rlid = ib_get_slid(rhdr); rqpn = qp->remote_qpn; svc_type = IB_CC_SVCTYPE_UC; break; default: rcu_read_unlock(); goto drop; } process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type); rcu_read_unlock(); } packet->rhf &= ~RHF_RCV_TYPE_ERR_SMASK; break; } default: break; } drop: return; } static inline void init_packet(struct hfi1_ctxtdata *rcd, struct hfi1_packet *packet) { packet->rsize = get_hdrqentsize(rcd); /* words */ packet->maxcnt = get_hdrq_cnt(rcd) * packet->rsize; /* words */ packet->rcd = rcd; packet->updegr = 0; packet->etail = -1; packet->rhf_addr = get_rhf_addr(rcd); packet->rhf = rhf_to_cpu(packet->rhf_addr); packet->rhqoff = hfi1_rcd_head(rcd); packet->numpkt = 0; } /* We support only two types - 9B and 16B for now */ static const hfi1_handle_cnp hfi1_handle_cnp_tbl[2] = { [HFI1_PKT_TYPE_9B] = &return_cnp, [HFI1_PKT_TYPE_16B] = &return_cnp_16B }; /** * hfi1_process_ecn_slowpath - Process FECN or BECN bits * @qp: The packet's destination QP * @pkt: The packet itself. * @prescan: Is the caller the RXQ prescan * * Process the packet's FECN or BECN bits. By now, the packet * has already been evaluated whether processing of those bit should * be done. * The significance of the @prescan argument is that if the caller * is the RXQ prescan, a CNP will be send out instead of waiting for the * normal packet processing to send an ACK with BECN set (or a CNP). */ bool hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt, bool prescan) { struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); struct ib_other_headers *ohdr = pkt->ohdr; struct ib_grh *grh = pkt->grh; u32 rqpn = 0; u16 pkey; u32 rlid, slid, dlid = 0; u8 hdr_type, sc, svc_type, opcode; bool is_mcast = false, ignore_fecn = false, do_cnp = false, fecn, becn; /* can be called from prescan */ if (pkt->etype == RHF_RCV_TYPE_BYPASS) { pkey = hfi1_16B_get_pkey(pkt->hdr); sc = hfi1_16B_get_sc(pkt->hdr); dlid = hfi1_16B_get_dlid(pkt->hdr); slid = hfi1_16B_get_slid(pkt->hdr); is_mcast = hfi1_is_16B_mcast(dlid); opcode = ib_bth_get_opcode(ohdr); hdr_type = HFI1_PKT_TYPE_16B; fecn = hfi1_16B_get_fecn(pkt->hdr); becn = hfi1_16B_get_becn(pkt->hdr); } else { pkey = ib_bth_get_pkey(ohdr); sc = hfi1_9B_get_sc5(pkt->hdr, pkt->rhf); dlid = qp->ibqp.qp_type != IB_QPT_UD ? ib_get_dlid(pkt->hdr) : ppd->lid; slid = ib_get_slid(pkt->hdr); is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) && (dlid != be16_to_cpu(IB_LID_PERMISSIVE)); opcode = ib_bth_get_opcode(ohdr); hdr_type = HFI1_PKT_TYPE_9B; fecn = ib_bth_get_fecn(ohdr); becn = ib_bth_get_becn(ohdr); } switch (qp->ibqp.qp_type) { case IB_QPT_UD: rlid = slid; rqpn = ib_get_sqpn(pkt->ohdr); svc_type = IB_CC_SVCTYPE_UD; break; case IB_QPT_SMI: case IB_QPT_GSI: rlid = slid; rqpn = ib_get_sqpn(pkt->ohdr); svc_type = IB_CC_SVCTYPE_UD; break; case IB_QPT_UC: rlid = rdma_ah_get_dlid(&qp->remote_ah_attr); rqpn = qp->remote_qpn; svc_type = IB_CC_SVCTYPE_UC; break; case IB_QPT_RC: rlid = rdma_ah_get_dlid(&qp->remote_ah_attr); rqpn = qp->remote_qpn; svc_type = IB_CC_SVCTYPE_RC; break; default: return false; } ignore_fecn = is_mcast || (opcode == IB_OPCODE_CNP) || (opcode == IB_OPCODE_RC_ACKNOWLEDGE); /* * ACKNOWLEDGE packets do not get a CNP but this will be * guarded by ignore_fecn above. */ do_cnp = prescan || (opcode >= IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST && opcode <= IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE) || opcode == TID_OP(READ_RESP) || opcode == TID_OP(ACK); /* Call appropriate CNP handler */ if (!ignore_fecn && do_cnp && fecn) hfi1_handle_cnp_tbl[hdr_type](ibp, qp, rqpn, pkey, dlid, rlid, sc, grh); if (becn) { u32 lqpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK; u8 sl = ibp->sc_to_sl[sc]; process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type); } return !ignore_fecn && fecn; } struct ps_mdata { struct hfi1_ctxtdata *rcd; u32 rsize; u32 maxcnt; u32 ps_head; u32 ps_tail; u32 ps_seq; }; static inline void init_ps_mdata(struct ps_mdata *mdata, struct hfi1_packet *packet) { struct hfi1_ctxtdata *rcd = packet->rcd; mdata->rcd = rcd; mdata->rsize = packet->rsize; mdata->maxcnt = packet->maxcnt; mdata->ps_head = packet->rhqoff; if (get_dma_rtail_setting(rcd)) { mdata->ps_tail = get_rcvhdrtail(rcd); if (rcd->ctxt == HFI1_CTRL_CTXT) mdata->ps_seq = hfi1_seq_cnt(rcd); else mdata->ps_seq = 0; /* not used with DMA_RTAIL */ } else { mdata->ps_tail = 0; /* used only with DMA_RTAIL*/ mdata->ps_seq = hfi1_seq_cnt(rcd); } } static inline int ps_done(struct ps_mdata *mdata, u64 rhf, struct hfi1_ctxtdata *rcd) { if (get_dma_rtail_setting(rcd)) return mdata->ps_head == mdata->ps_tail; return mdata->ps_seq != rhf_rcv_seq(rhf); } static inline int ps_skip(struct ps_mdata *mdata, u64 rhf, struct hfi1_ctxtdata *rcd) { /* * Control context can potentially receive an invalid rhf. * Drop such packets. */ if ((rcd->ctxt == HFI1_CTRL_CTXT) && (mdata->ps_head != mdata->ps_tail)) return mdata->ps_seq != rhf_rcv_seq(rhf); return 0; } static inline void update_ps_mdata(struct ps_mdata *mdata, struct hfi1_ctxtdata *rcd) { mdata->ps_head += mdata->rsize; if (mdata->ps_head >= mdata->maxcnt) mdata->ps_head = 0; /* Control context must do seq counting */ if (!get_dma_rtail_setting(rcd) || rcd->ctxt == HFI1_CTRL_CTXT) mdata->ps_seq = hfi1_seq_incr_wrap(mdata->ps_seq); } /* * prescan_rxq - search through the receive queue looking for packets * containing Excplicit Congestion Notifications (FECNs, or BECNs). * When an ECN is found, process the Congestion Notification, and toggle * it off. * This is declared as a macro to allow quick checking of the port to avoid * the overhead of a function call if not enabled. */ #define prescan_rxq(rcd, packet) \ do { \ if (rcd->ppd->cc_prescan) \ __prescan_rxq(packet); \ } while (0) static void __prescan_rxq(struct hfi1_packet *packet) { struct hfi1_ctxtdata *rcd = packet->rcd; struct ps_mdata mdata; init_ps_mdata(&mdata, packet); while (1) { struct hfi1_ibport *ibp = rcd_to_iport(rcd); __le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head + packet->rcd->rhf_offset; struct rvt_qp *qp; struct ib_header *hdr; struct rvt_dev_info *rdi = &rcd->dd->verbs_dev.rdi; u64 rhf = rhf_to_cpu(rhf_addr); u32 etype = rhf_rcv_type(rhf), qpn, bth1; u8 lnh; if (ps_done(&mdata, rhf, rcd)) break; if (ps_skip(&mdata, rhf, rcd)) goto next; if (etype != RHF_RCV_TYPE_IB) goto next; packet->hdr = hfi1_get_msgheader(packet->rcd, rhf_addr); hdr = packet->hdr; lnh = ib_get_lnh(hdr); if (lnh == HFI1_LRH_BTH) { packet->ohdr = &hdr->u.oth; packet->grh = NULL; } else if (lnh == HFI1_LRH_GRH) { packet->ohdr = &hdr->u.l.oth; packet->grh = &hdr->u.l.grh; } else { goto next; /* just in case */ } if (!hfi1_may_ecn(packet)) goto next; bth1 = be32_to_cpu(packet->ohdr->bth[1]); qpn = bth1 & RVT_QPN_MASK; rcu_read_lock(); qp = rvt_lookup_qpn(rdi, &ibp->rvp, qpn); if (!qp) { rcu_read_unlock(); goto next; } hfi1_process_ecn_slowpath(qp, packet, true); rcu_read_unlock(); /* turn off BECN, FECN */ bth1 &= ~(IB_FECN_SMASK | IB_BECN_SMASK); packet->ohdr->bth[1] = cpu_to_be32(bth1); next: update_ps_mdata(&mdata, rcd); } } static void process_rcv_qp_work(struct hfi1_packet *packet) { struct rvt_qp *qp, *nqp; struct hfi1_ctxtdata *rcd = packet->rcd; /* * Iterate over all QPs waiting to respond. * The list won't change since the IRQ is only run on one CPU. */ list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) { list_del_init(&qp->rspwait); if (qp->r_flags & RVT_R_RSP_NAK) { qp->r_flags &= ~RVT_R_RSP_NAK; packet->qp = qp; hfi1_send_rc_ack(packet, 0); } if (qp->r_flags & RVT_R_RSP_SEND) { unsigned long flags; qp->r_flags &= ~RVT_R_RSP_SEND; spin_lock_irqsave(&qp->s_lock, flags); if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND) hfi1_schedule_send(qp); spin_unlock_irqrestore(&qp->s_lock, flags); } rvt_put_qp(qp); } } static noinline int max_packet_exceeded(struct hfi1_packet *packet, int thread) { if (thread) { if ((packet->numpkt & (MAX_PKT_RECV_THREAD - 1)) == 0) /* allow defered processing */ process_rcv_qp_work(packet); cond_resched(); return RCV_PKT_OK; } else { this_cpu_inc(*packet->rcd->dd->rcv_limit); return RCV_PKT_LIMIT; } } static inline int check_max_packet(struct hfi1_packet *packet, int thread) { int ret = RCV_PKT_OK; if (unlikely((packet->numpkt & (MAX_PKT_RECV - 1)) == 0)) ret = max_packet_exceeded(packet, thread); return ret; } static noinline int skip_rcv_packet(struct hfi1_packet *packet, int thread) { int ret; packet->rcd->dd->ctx0_seq_drop++; /* Set up for the next packet */ packet->rhqoff += packet->rsize; if (packet->rhqoff >= packet->maxcnt) packet->rhqoff = 0; packet->numpkt++; ret = check_max_packet(packet, thread); packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff + packet->rcd->rhf_offset; packet->rhf = rhf_to_cpu(packet->rhf_addr); return ret; } static void process_rcv_packet_napi(struct hfi1_packet *packet) { packet->etype = rhf_rcv_type(packet->rhf); /* total length */ packet->tlen = rhf_pkt_len(packet->rhf); /* in bytes */ /* retrieve eager buffer details */ packet->etail = rhf_egr_index(packet->rhf); packet->ebuf = get_egrbuf(packet->rcd, packet->rhf, &packet->updegr); /* * Prefetch the contents of the eager buffer. It is * OK to send a negative length to prefetch_range(). * The +2 is the size of the RHF. */ prefetch_range(packet->ebuf, packet->tlen - ((packet->rcd->rcvhdrqentsize - (rhf_hdrq_offset(packet->rhf) + 2)) * 4)); packet->rcd->rhf_rcv_function_map[packet->etype](packet); packet->numpkt++; /* Set up for the next packet */ packet->rhqoff += packet->rsize; if (packet->rhqoff >= packet->maxcnt) packet->rhqoff = 0; packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff + packet->rcd->rhf_offset; packet->rhf = rhf_to_cpu(packet->rhf_addr); } static inline int process_rcv_packet(struct hfi1_packet *packet, int thread) { int ret; packet->etype = rhf_rcv_type(packet->rhf); /* total length */ packet->tlen = rhf_pkt_len(packet->rhf); /* in bytes */ /* retrieve eager buffer details */ packet->ebuf = NULL; if (rhf_use_egr_bfr(packet->rhf)) { packet->etail = rhf_egr_index(packet->rhf); packet->ebuf = get_egrbuf(packet->rcd, packet->rhf, &packet->updegr); /* * Prefetch the contents of the eager buffer. It is * OK to send a negative length to prefetch_range(). * The +2 is the size of the RHF. */ prefetch_range(packet->ebuf, packet->tlen - ((get_hdrqentsize(packet->rcd) - (rhf_hdrq_offset(packet->rhf) + 2)) * 4)); } /* * Call a type specific handler for the packet. We * should be able to trust that etype won't be beyond * the range of valid indexes. If so something is really * wrong and we can probably just let things come * crashing down. There is no need to eat another * comparison in this performance critical code. */ packet->rcd->rhf_rcv_function_map[packet->etype](packet); packet->numpkt++; /* Set up for the next packet */ packet->rhqoff += packet->rsize; if (packet->rhqoff >= packet->maxcnt) packet->rhqoff = 0; ret = check_max_packet(packet, thread); packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff + packet->rcd->rhf_offset; packet->rhf = rhf_to_cpu(packet->rhf_addr); return ret; } static inline void process_rcv_update(int last, struct hfi1_packet *packet) { /* * Update head regs etc., every 16 packets, if not last pkt, * to help prevent rcvhdrq overflows, when many packets * are processed and queue is nearly full. * Don't request an interrupt for intermediate updates. */ if (!last && !(packet->numpkt & 0xf)) { update_usrhead(packet->rcd, packet->rhqoff, packet->updegr, packet->etail, 0, 0); packet->updegr = 0; } packet->grh = NULL; } static inline void finish_packet(struct hfi1_packet *packet) { /* * Nothing we need to free for the packet. * * The only thing we need to do is a final update and call for an * interrupt */ update_usrhead(packet->rcd, hfi1_rcd_head(packet->rcd), packet->updegr, packet->etail, rcv_intr_dynamic, packet->numpkt); } /* * handle_receive_interrupt_napi_fp - receive a packet * @rcd: the context * @budget: polling budget * * Called from interrupt handler for receive interrupt. * This is the fast path interrupt handler * when executing napi soft irq environment. */ int handle_receive_interrupt_napi_fp(struct hfi1_ctxtdata *rcd, int budget) { struct hfi1_packet packet; init_packet(rcd, &packet); if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) goto bail; while (packet.numpkt < budget) { process_rcv_packet_napi(&packet); if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf))) break; process_rcv_update(0, &packet); } hfi1_set_rcd_head(rcd, packet.rhqoff); bail: finish_packet(&packet); return packet.numpkt; } /* * Handle receive interrupts when using the no dma rtail option. */ int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread) { int last = RCV_PKT_OK; struct hfi1_packet packet; init_packet(rcd, &packet); if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) { last = RCV_PKT_DONE; goto bail; } prescan_rxq(rcd, &packet); while (last == RCV_PKT_OK) { last = process_rcv_packet(&packet, thread); if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf))) last = RCV_PKT_DONE; process_rcv_update(last, &packet); } process_rcv_qp_work(&packet); hfi1_set_rcd_head(rcd, packet.rhqoff); bail: finish_packet(&packet); return last; } int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread) { u32 hdrqtail; int last = RCV_PKT_OK; struct hfi1_packet packet; init_packet(rcd, &packet); hdrqtail = get_rcvhdrtail(rcd); if (packet.rhqoff == hdrqtail) { last = RCV_PKT_DONE; goto bail; } smp_rmb(); /* prevent speculative reads of dma'ed hdrq */ prescan_rxq(rcd, &packet); while (last == RCV_PKT_OK) { last = process_rcv_packet(&packet, thread); if (packet.rhqoff == hdrqtail) last = RCV_PKT_DONE; process_rcv_update(last, &packet); } process_rcv_qp_work(&packet); hfi1_set_rcd_head(rcd, packet.rhqoff); bail: finish_packet(&packet); return last; } static void set_all_fastpath(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) { u16 i; /* * For dynamically allocated kernel contexts (like vnic) switch * interrupt handler only for that context. Otherwise, switch * interrupt handler for all statically allocated kernel contexts. */ if (rcd->ctxt >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic) { hfi1_rcd_get(rcd); hfi1_set_fast(rcd); hfi1_rcd_put(rcd); return; } for (i = HFI1_CTRL_CTXT + 1; i < dd->num_rcv_contexts; i++) { rcd = hfi1_rcd_get_by_index(dd, i); if (rcd && (i < dd->first_dyn_alloc_ctxt || rcd->is_vnic)) hfi1_set_fast(rcd); hfi1_rcd_put(rcd); } } void set_all_slowpath(struct hfi1_devdata *dd) { struct hfi1_ctxtdata *rcd; u16 i; /* HFI1_CTRL_CTXT must always use the slow path interrupt handler */ for (i = HFI1_CTRL_CTXT + 1; i < dd->num_rcv_contexts; i++) { rcd = hfi1_rcd_get_by_index(dd, i); if (!rcd) continue; if (i < dd->first_dyn_alloc_ctxt || rcd->is_vnic) rcd->do_interrupt = rcd->slow_handler; hfi1_rcd_put(rcd); } } static bool __set_armed_to_active(struct hfi1_packet *packet) { u8 etype = rhf_rcv_type(packet->rhf); u8 sc = SC15_PACKET; if (etype == RHF_RCV_TYPE_IB) { struct ib_header *hdr = hfi1_get_msgheader(packet->rcd, packet->rhf_addr); sc = hfi1_9B_get_sc5(hdr, packet->rhf); } else if (etype == RHF_RCV_TYPE_BYPASS) { struct hfi1_16b_header *hdr = hfi1_get_16B_header( packet->rcd, packet->rhf_addr); sc = hfi1_16B_get_sc(hdr); } if (sc != SC15_PACKET) { int hwstate = driver_lstate(packet->rcd->ppd); struct work_struct *lsaw = &packet->rcd->ppd->linkstate_active_work; if (hwstate != IB_PORT_ACTIVE) { dd_dev_info(packet->rcd->dd, "Unexpected link state %s\n", opa_lstate_name(hwstate)); return false; } queue_work(packet->rcd->ppd->link_wq, lsaw); return true; } return false; } /** * set_armed_to_active - the fast path for armed to active * @packet: the packet structure * * Return true if packet processing needs to bail. */ static bool set_armed_to_active(struct hfi1_packet *packet) { if (likely(packet->rcd->ppd->host_link_state != HLS_UP_ARMED)) return false; return __set_armed_to_active(packet); } /* * handle_receive_interrupt - receive a packet * @rcd: the context * * Called from interrupt handler for errors or receive interrupt. * This is the slow path interrupt handler. */ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread) { struct hfi1_devdata *dd = rcd->dd; u32 hdrqtail; int needset, last = RCV_PKT_OK; struct hfi1_packet packet; int skip_pkt = 0; if (!rcd->rcvhdrq) return RCV_PKT_OK; /* Control context will always use the slow path interrupt handler */ needset = (rcd->ctxt == HFI1_CTRL_CTXT) ? 0 : 1; init_packet(rcd, &packet); if (!get_dma_rtail_setting(rcd)) { if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) { last = RCV_PKT_DONE; goto bail; } hdrqtail = 0; } else { hdrqtail = get_rcvhdrtail(rcd); if (packet.rhqoff == hdrqtail) { last = RCV_PKT_DONE; goto bail; } smp_rmb(); /* prevent speculative reads of dma'ed hdrq */ /* * Control context can potentially receive an invalid * rhf. Drop such packets. */ if (rcd->ctxt == HFI1_CTRL_CTXT) if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) skip_pkt = 1; } prescan_rxq(rcd, &packet); while (last == RCV_PKT_OK) { if (hfi1_need_drop(dd)) { /* On to the next packet */ packet.rhqoff += packet.rsize; packet.rhf_addr = (__le32 *)rcd->rcvhdrq + packet.rhqoff + rcd->rhf_offset; packet.rhf = rhf_to_cpu(packet.rhf_addr); } else if (skip_pkt) { last = skip_rcv_packet(&packet, thread); skip_pkt = 0; } else { if (set_armed_to_active(&packet)) goto bail; last = process_rcv_packet(&packet, thread); } if (!get_dma_rtail_setting(rcd)) { if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf))) last = RCV_PKT_DONE; } else { if (packet.rhqoff == hdrqtail) last = RCV_PKT_DONE; /* * Control context can potentially receive an invalid * rhf. Drop such packets. */ if (rcd->ctxt == HFI1_CTRL_CTXT) { bool lseq; lseq = hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf)); if (!last && lseq) skip_pkt = 1; } } if (needset) { needset = false; set_all_fastpath(dd, rcd); } process_rcv_update(last, &packet); } process_rcv_qp_work(&packet); hfi1_set_rcd_head(rcd, packet.rhqoff); bail: /* * Always write head at end, and setup rcv interrupt, even * if no packets were processed. */ finish_packet(&packet); return last; } /* * handle_receive_interrupt_napi_sp - receive a packet * @rcd: the context * @budget: polling budget * * Called from interrupt handler for errors or receive interrupt. * This is the slow path interrupt handler * when executing napi soft irq environment. */ int handle_receive_interrupt_napi_sp(struct hfi1_ctxtdata *rcd, int budget) { struct hfi1_devdata *dd = rcd->dd; int last = RCV_PKT_OK; bool needset = true; struct hfi1_packet packet; init_packet(rcd, &packet); if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) goto bail; while (last != RCV_PKT_DONE && packet.numpkt < budget) { if (hfi1_need_drop(dd)) { /* On to the next packet */ packet.rhqoff += packet.rsize; packet.rhf_addr = (__le32 *)rcd->rcvhdrq + packet.rhqoff + rcd->rhf_offset; packet.rhf = rhf_to_cpu(packet.rhf_addr); } else { if (set_armed_to_active(&packet)) goto bail; process_rcv_packet_napi(&packet); } if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf))) last = RCV_PKT_DONE; if (needset) { needset = false; set_all_fastpath(dd, rcd); } process_rcv_update(last, &packet); } hfi1_set_rcd_head(rcd, packet.rhqoff); bail: /* * Always write head at end, and setup rcv interrupt, even * if no packets were processed. */ finish_packet(&packet); return packet.numpkt; } /* * We may discover in the interrupt that the hardware link state has * changed from ARMED to ACTIVE (due to the arrival of a non-SC15 packet), * and we need to update the driver's notion of the link state. We cannot * run set_link_state from interrupt context, so we queue this function on * a workqueue. * * We delay the regular interrupt processing until after the state changes * so that the link will be in the correct state by the time any application * we wake up attempts to send a reply to any message it received. * (Subsequent receive interrupts may possibly force the wakeup before we * update the link state.) * * The rcd is freed in hfi1_free_ctxtdata after hfi1_postinit_cleanup invokes * dd->f_cleanup(dd) to disable the interrupt handler and flush workqueues, * so we're safe from use-after-free of the rcd. */ void receive_interrupt_work(struct work_struct *work) { struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, linkstate_active_work); struct hfi1_devdata *dd = ppd->dd; struct hfi1_ctxtdata *rcd; u16 i; /* Received non-SC15 packet implies neighbor_normal */ ppd->neighbor_normal = 1; set_link_state(ppd, HLS_UP_ACTIVE); /* * Interrupt all statically allocated kernel contexts that could * have had an interrupt during auto activation. */ for (i = HFI1_CTRL_CTXT; i < dd->first_dyn_alloc_ctxt; i++) { rcd = hfi1_rcd_get_by_index(dd, i); if (rcd) force_recv_intr(rcd); hfi1_rcd_put(rcd); } } /* * Convert a given MTU size to the on-wire MAD packet enumeration. * Return -1 if the size is invalid. */ int mtu_to_enum(u32 mtu, int default_if_bad) { switch (mtu) { case 0: return OPA_MTU_0; case 256: return OPA_MTU_256; case 512: return OPA_MTU_512; case 1024: return OPA_MTU_1024; case 2048: return OPA_MTU_2048; case 4096: return OPA_MTU_4096; case 8192: return OPA_MTU_8192; case 10240: return OPA_MTU_10240; } return default_if_bad; } u16 enum_to_mtu(int mtu) { switch (mtu) { case OPA_MTU_0: return 0; case OPA_MTU_256: return 256; case OPA_MTU_512: return 512; case OPA_MTU_1024: return 1024; case OPA_MTU_2048: return 2048; case OPA_MTU_4096: return 4096; case OPA_MTU_8192: return 8192; case OPA_MTU_10240: return 10240; default: return 0xffff; } } /* * set_mtu - set the MTU * @ppd: the per port data * * We can handle "any" incoming size, the issue here is whether we * need to restrict our outgoing size. We do not deal with what happens * to programs that are already running when the size changes. */ int set_mtu(struct hfi1_pportdata *ppd) { struct hfi1_devdata *dd = ppd->dd; int i, drain, ret = 0, is_up = 0; ppd->ibmtu = 0; for (i = 0; i < ppd->vls_supported; i++) if (ppd->ibmtu < dd->vld[i].mtu) ppd->ibmtu = dd->vld[i].mtu; ppd->ibmaxlen = ppd->ibmtu + lrh_max_header_bytes(ppd->dd); mutex_lock(&ppd->hls_lock); if (ppd->host_link_state == HLS_UP_INIT || ppd->host_link_state == HLS_UP_ARMED || ppd->host_link_state == HLS_UP_ACTIVE) is_up = 1; drain = !is_ax(dd) && is_up; if (drain) /* * MTU is specified per-VL. To ensure that no packet gets * stuck (due, e.g., to the MTU for the packet's VL being * reduced), empty the per-VL FIFOs before adjusting MTU. */ ret = stop_drain_data_vls(dd); if (ret) { dd_dev_err(dd, "%s: cannot stop/drain VLs - refusing to change per-VL MTUs\n", __func__); goto err; } hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_MTU, 0); if (drain) open_fill_data_vls(dd); /* reopen all VLs */ err: mutex_unlock(&ppd->hls_lock); return ret; } int hfi1_set_lid(struct hfi1_pportdata *ppd, u32 lid, u8 lmc) { struct hfi1_devdata *dd = ppd->dd; ppd->lid = lid; ppd->lmc = lmc; hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LIDLMC, 0); dd_dev_info(dd, "port %u: got a lid: 0x%x\n", ppd->port, lid); return 0; } void shutdown_led_override(struct hfi1_pportdata *ppd) { struct hfi1_devdata *dd = ppd->dd; /* * This pairs with the memory barrier in hfi1_start_led_override to * ensure that we read the correct state of LED beaconing represented * by led_override_timer_active */ smp_rmb(); if (atomic_read(&ppd->led_override_timer_active)) { del_timer_sync(&ppd->led_override_timer); atomic_set(&ppd->led_override_timer_active, 0); /* Ensure the atomic_set is visible to all CPUs */ smp_wmb(); } /* Hand control of the LED to the DC for normal operation */ write_csr(dd, DCC_CFG_LED_CNTRL, 0); } static void run_led_override(struct timer_list *t) { struct hfi1_pportdata *ppd = from_timer(ppd, t, led_override_timer); struct hfi1_devdata *dd = ppd->dd; unsigned long timeout; int phase_idx; if (!(dd->flags & HFI1_INITTED)) return; phase_idx = ppd->led_override_phase & 1; setextled(dd, phase_idx); timeout = ppd->led_override_vals[phase_idx]; /* Set up for next phase */ ppd->led_override_phase = !ppd->led_override_phase; mod_timer(&ppd->led_override_timer, jiffies + timeout); } /* * To have the LED blink in a particular pattern, provide timeon and timeoff * in milliseconds. * To turn off custom blinking and return to normal operation, use * shutdown_led_override() */ void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon, unsigned int timeoff) { if (!(ppd->dd->flags & HFI1_INITTED)) return; /* Convert to jiffies for direct use in timer */ ppd->led_override_vals[0] = msecs_to_jiffies(timeoff); ppd->led_override_vals[1] = msecs_to_jiffies(timeon); /* Arbitrarily start from LED on phase */ ppd->led_override_phase = 1; /* * If the timer has not already been started, do so. Use a "quick" * timeout so the handler will be called soon to look at our request. */ if (!timer_pending(&ppd->led_override_timer)) { timer_setup(&ppd->led_override_timer, run_led_override, 0); ppd->led_override_timer.expires = jiffies + 1; add_timer(&ppd->led_override_timer); atomic_set(&ppd->led_override_timer_active, 1); /* Ensure the atomic_set is visible to all CPUs */ smp_wmb(); } } /** * hfi1_reset_device - reset the chip if possible * @unit: the device to reset * * Whether or not reset is successful, we attempt to re-initialize the chip * (that is, much like a driver unload/reload). We clear the INITTED flag * so that the various entry points will fail until we reinitialize. For * now, we only allow this if no user contexts are open that use chip resources */ int hfi1_reset_device(int unit) { int ret; struct hfi1_devdata *dd = hfi1_lookup(unit); struct hfi1_pportdata *ppd; int pidx; if (!dd) { ret = -ENODEV; goto bail; } dd_dev_info(dd, "Reset on unit %u requested\n", unit); if (!dd->kregbase1 || !(dd->flags & HFI1_PRESENT)) { dd_dev_info(dd, "Invalid unit number %u or not initialized or not present\n", unit); ret = -ENXIO; goto bail; } /* If there are any user/vnic contexts, we cannot reset */ mutex_lock(&hfi1_mutex); if (dd->rcd) if (hfi1_stats.sps_ctxts) { mutex_unlock(&hfi1_mutex); ret = -EBUSY; goto bail; } mutex_unlock(&hfi1_mutex); for (pidx = 0; pidx < dd->num_pports; ++pidx) { ppd = dd->pport + pidx; shutdown_led_override(ppd); } if (dd->flags & HFI1_HAS_SEND_DMA) sdma_exit(dd); hfi1_reset_cpu_counters(dd); ret = hfi1_init(dd, 1); if (ret) dd_dev_err(dd, "Reinitialize unit %u after reset failed with %d\n", unit, ret); else dd_dev_info(dd, "Reinitialized unit %u after resetting\n", unit); bail: return ret; } static inline void hfi1_setup_ib_header(struct hfi1_packet *packet) { packet->hdr = (struct hfi1_ib_message_header *) hfi1_get_msgheader(packet->rcd, packet->rhf_addr); packet->hlen = (u8 *)packet->rhf_addr - (u8 *)packet->hdr; } static int hfi1_bypass_ingress_pkt_check(struct hfi1_packet *packet) { struct hfi1_pportdata *ppd = packet->rcd->ppd; /* slid and dlid cannot be 0 */ if ((!packet->slid) || (!packet->dlid)) return -EINVAL; /* Compare port lid with incoming packet dlid */ if ((!(hfi1_is_16B_mcast(packet->dlid))) && (packet->dlid != opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 16B))) { if ((packet->dlid & ~((1 << ppd->lmc) - 1)) != ppd->lid) return -EINVAL; } /* No multicast packets with SC15 */ if ((hfi1_is_16B_mcast(packet->dlid)) && (packet->sc == 0xF)) return -EINVAL; /* Packets with permissive DLID always on SC15 */ if ((packet->dlid == opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 16B)) && (packet->sc != 0xF)) return -EINVAL; return 0; } static int hfi1_setup_9B_packet(struct hfi1_packet *packet) { struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd); struct ib_header *hdr; u8 lnh; hfi1_setup_ib_header(packet); hdr = packet->hdr; lnh = ib_get_lnh(hdr); if (lnh == HFI1_LRH_BTH) { packet->ohdr = &hdr->u.oth; packet->grh = NULL; } else if (lnh == HFI1_LRH_GRH) { u32 vtf; packet->ohdr = &hdr->u.l.oth; packet->grh = &hdr->u.l.grh; if (packet->grh->next_hdr != IB_GRH_NEXT_HDR) goto drop; vtf = be32_to_cpu(packet->grh->version_tclass_flow); if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION) goto drop; } else { goto drop; } /* Query commonly used fields from packet header */ packet->payload = packet->ebuf; packet->opcode = ib_bth_get_opcode(packet->ohdr); packet->slid = ib_get_slid(hdr); packet->dlid = ib_get_dlid(hdr); if (unlikely((packet->dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) && (packet->dlid != be16_to_cpu(IB_LID_PERMISSIVE)))) packet->dlid += opa_get_mcast_base(OPA_MCAST_NR) - be16_to_cpu(IB_MULTICAST_LID_BASE); packet->sl = ib_get_sl(hdr); packet->sc = hfi1_9B_get_sc5(hdr, packet->rhf); packet->pad = ib_bth_get_pad(packet->ohdr); packet->extra_byte = 0; packet->pkey = ib_bth_get_pkey(packet->ohdr); packet->migrated = ib_bth_is_migration(packet->ohdr); return 0; drop: ibp->rvp.n_pkt_drops++; return -EINVAL; } static int hfi1_setup_bypass_packet(struct hfi1_packet *packet) { /* * Bypass packets have a different header/payload split * compared to an IB packet. * Current split is set such that 16 bytes of the actual * header is in the header buffer and the remining is in * the eager buffer. We chose 16 since hfi1 driver only * supports 16B bypass packets and we will be able to * receive the entire LRH with such a split. */ struct hfi1_ctxtdata *rcd = packet->rcd; struct hfi1_pportdata *ppd = rcd->ppd; struct hfi1_ibport *ibp = &ppd->ibport_data; u8 l4; packet->hdr = (struct hfi1_16b_header *) hfi1_get_16B_header(packet->rcd, packet->rhf_addr); l4 = hfi1_16B_get_l4(packet->hdr); if (l4 == OPA_16B_L4_IB_LOCAL) { packet->ohdr = packet->ebuf; packet->grh = NULL; packet->opcode = ib_bth_get_opcode(packet->ohdr); packet->pad = hfi1_16B_bth_get_pad(packet->ohdr); /* hdr_len_by_opcode already has an IB LRH factored in */ packet->hlen = hdr_len_by_opcode[packet->opcode] + (LRH_16B_BYTES - LRH_9B_BYTES); packet->migrated = opa_bth_is_migration(packet->ohdr); } else if (l4 == OPA_16B_L4_IB_GLOBAL) { u32 vtf; u8 grh_len = sizeof(struct ib_grh); packet->ohdr = packet->ebuf + grh_len; packet->grh = packet->ebuf; packet->opcode = ib_bth_get_opcode(packet->ohdr); packet->pad = hfi1_16B_bth_get_pad(packet->ohdr); /* hdr_len_by_opcode already has an IB LRH factored in */ packet->hlen = hdr_len_by_opcode[packet->opcode] + (LRH_16B_BYTES - LRH_9B_BYTES) + grh_len; packet->migrated = opa_bth_is_migration(packet->ohdr); if (packet->grh->next_hdr != IB_GRH_NEXT_HDR) goto drop; vtf = be32_to_cpu(packet->grh->version_tclass_flow); if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION) goto drop; } else if (l4 == OPA_16B_L4_FM) { packet->mgmt = packet->ebuf; packet->ohdr = NULL; packet->grh = NULL; packet->opcode = IB_OPCODE_UD_SEND_ONLY; packet->pad = OPA_16B_L4_FM_PAD; packet->hlen = OPA_16B_L4_FM_HLEN; packet->migrated = false; } else { goto drop; } /* Query commonly used fields from packet header */ packet->payload = packet->ebuf + packet->hlen - LRH_16B_BYTES; packet->slid = hfi1_16B_get_slid(packet->hdr); packet->dlid = hfi1_16B_get_dlid(packet->hdr); if (unlikely(hfi1_is_16B_mcast(packet->dlid))) packet->dlid += opa_get_mcast_base(OPA_MCAST_NR) - opa_get_lid(opa_get_mcast_base(OPA_MCAST_NR), 16B); packet->sc = hfi1_16B_get_sc(packet->hdr); packet->sl = ibp->sc_to_sl[packet->sc]; packet->extra_byte = SIZE_OF_LT; packet->pkey = hfi1_16B_get_pkey(packet->hdr); if (hfi1_bypass_ingress_pkt_check(packet)) goto drop; return 0; drop: hfi1_cdbg(PKT, "%s: packet dropped", __func__); ibp->rvp.n_pkt_drops++; return -EINVAL; } static void show_eflags_errs(struct hfi1_packet *packet) { struct hfi1_ctxtdata *rcd = packet->rcd; u32 rte = rhf_rcv_type_err(packet->rhf); dd_dev_err(rcd->dd, "receive context %d: rhf 0x%016llx, errs [ %s%s%s%s%s%s%s] rte 0x%x\n", rcd->ctxt, packet->rhf, packet->rhf & RHF_K_HDR_LEN_ERR ? "k_hdr_len " : "", packet->rhf & RHF_DC_UNC_ERR ? "dc_unc " : "", packet->rhf & RHF_DC_ERR ? "dc " : "", packet->rhf & RHF_TID_ERR ? "tid " : "", packet->rhf & RHF_LEN_ERR ? "len " : "", packet->rhf & RHF_ECC_ERR ? "ecc " : "", packet->rhf & RHF_ICRC_ERR ? "icrc " : "", rte); } void handle_eflags(struct hfi1_packet *packet) { struct hfi1_ctxtdata *rcd = packet->rcd; rcv_hdrerr(rcd, rcd->ppd, packet); if (rhf_err_flags(packet->rhf)) show_eflags_errs(packet); } static void hfi1_ipoib_ib_rcv(struct hfi1_packet *packet) { struct hfi1_ibport *ibp; struct net_device *netdev; struct hfi1_ctxtdata *rcd = packet->rcd; struct napi_struct *napi = rcd->napi; struct sk_buff *skb; struct hfi1_netdev_rxq *rxq = container_of(napi, struct hfi1_netdev_rxq, napi); u32 extra_bytes; u32 tlen, qpnum; bool do_work, do_cnp; trace_hfi1_rcvhdr(packet); hfi1_setup_ib_header(packet); packet->ohdr = &((struct ib_header *)packet->hdr)->u.oth; packet->grh = NULL; if (unlikely(rhf_err_flags(packet->rhf))) { handle_eflags(packet); return; } qpnum = ib_bth_get_qpn(packet->ohdr); netdev = hfi1_netdev_get_data(rcd->dd, qpnum); if (!netdev) goto drop_no_nd; trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf))); trace_ctxt_rsm_hist(rcd->ctxt); /* handle congestion notifications */ do_work = hfi1_may_ecn(packet); if (unlikely(do_work)) { do_cnp = (packet->opcode != IB_OPCODE_CNP); (void)hfi1_process_ecn_slowpath(hfi1_ipoib_priv(netdev)->qp, packet, do_cnp); } /* * We have split point after last byte of DETH * lets strip padding and CRC and ICRC. * tlen is whole packet len so we need to * subtract header size as well. */ tlen = packet->tlen; extra_bytes = ib_bth_get_pad(packet->ohdr) + (SIZE_OF_CRC << 2) + packet->hlen; if (unlikely(tlen < extra_bytes)) goto drop; tlen -= extra_bytes; skb = hfi1_ipoib_prepare_skb(rxq, tlen, packet->ebuf); if (unlikely(!skb)) goto drop; dev_sw_netstats_rx_add(netdev, skb->len); skb->dev = netdev; skb->pkt_type = PACKET_HOST; netif_receive_skb(skb); return; drop: ++netdev->stats.rx_dropped; drop_no_nd: ibp = rcd_to_iport(packet->rcd); ++ibp->rvp.n_pkt_drops; } /* * The following functions are called by the interrupt handler. They are type * specific handlers for each packet type. */ static void process_receive_ib(struct hfi1_packet *packet) { if (hfi1_setup_9B_packet(packet)) return; if (unlikely(hfi1_dbg_should_fault_rx(packet))) return; trace_hfi1_rcvhdr(packet); if (unlikely(rhf_err_flags(packet->rhf))) { handle_eflags(packet); return; } hfi1_ib_rcv(packet); } static void process_receive_bypass(struct hfi1_packet *packet) { struct hfi1_devdata *dd = packet->rcd->dd; if (hfi1_setup_bypass_packet(packet)) return; trace_hfi1_rcvhdr(packet); if (unlikely(rhf_err_flags(packet->rhf))) { handle_eflags(packet); return; } if (hfi1_16B_get_l2(packet->hdr) == 0x2) { hfi1_16B_rcv(packet); } else { dd_dev_err(dd, "Bypass packets other than 16B are not supported in normal operation. Dropping\n"); incr_cntr64(&dd->sw_rcv_bypass_packet_errors); if (!(dd->err_info_rcvport.status_and_code & OPA_EI_STATUS_SMASK)) { u64 *flits = packet->ebuf; if (flits && !(packet->rhf & RHF_LEN_ERR)) { dd->err_info_rcvport.packet_flit1 = flits[0]; dd->err_info_rcvport.packet_flit2 = packet->tlen > sizeof(flits[0]) ? flits[1] : 0; } dd->err_info_rcvport.status_and_code |= (OPA_EI_STATUS_SMASK | BAD_L2_ERR); } } } static void process_receive_error(struct hfi1_packet *packet) { /* KHdrHCRCErr -- KDETH packet with a bad HCRC */ if (unlikely( hfi1_dbg_fault_suppress_err(&packet->rcd->dd->verbs_dev) && (rhf_rcv_type_err(packet->rhf) == RHF_RCV_TYPE_ERROR || packet->rhf & RHF_DC_ERR))) return; hfi1_setup_ib_header(packet); handle_eflags(packet); if (unlikely(rhf_err_flags(packet->rhf))) dd_dev_err(packet->rcd->dd, "Unhandled error packet received. Dropping.\n"); } static void kdeth_process_expected(struct hfi1_packet *packet) { hfi1_setup_9B_packet(packet); if (unlikely(hfi1_dbg_should_fault_rx(packet))) return; if (unlikely(rhf_err_flags(packet->rhf))) { struct hfi1_ctxtdata *rcd = packet->rcd; if (hfi1_handle_kdeth_eflags(rcd, rcd->ppd, packet)) return; } hfi1_kdeth_expected_rcv(packet); } static void kdeth_process_eager(struct hfi1_packet *packet) { hfi1_setup_9B_packet(packet); if (unlikely(hfi1_dbg_should_fault_rx(packet))) return; trace_hfi1_rcvhdr(packet); if (unlikely(rhf_err_flags(packet->rhf))) { struct hfi1_ctxtdata *rcd = packet->rcd; show_eflags_errs(packet); if (hfi1_handle_kdeth_eflags(rcd, rcd->ppd, packet)) return; } hfi1_kdeth_eager_rcv(packet); } static void process_receive_invalid(struct hfi1_packet *packet) { dd_dev_err(packet->rcd->dd, "Invalid packet type %d. Dropping\n", rhf_rcv_type(packet->rhf)); } #define HFI1_RCVHDR_DUMP_MAX 5 void seqfile_dump_rcd(struct seq_file *s, struct hfi1_ctxtdata *rcd) { struct hfi1_packet packet; struct ps_mdata mdata; int i; seq_printf(s, "Rcd %u: RcvHdr cnt %u entsize %u %s ctrl 0x%08llx status 0x%08llx, head %llu tail %llu sw head %u\n", rcd->ctxt, get_hdrq_cnt(rcd), get_hdrqentsize(rcd), get_dma_rtail_setting(rcd) ? "dma_rtail" : "nodma_rtail", read_kctxt_csr(rcd->dd, rcd->ctxt, RCV_CTXT_CTRL), read_kctxt_csr(rcd->dd, rcd->ctxt, RCV_CTXT_STATUS), read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD) & RCV_HDR_HEAD_HEAD_MASK, read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL), rcd->head); init_packet(rcd, &packet); init_ps_mdata(&mdata, &packet); for (i = 0; i < HFI1_RCVHDR_DUMP_MAX; i++) { __le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head + rcd->rhf_offset; struct ib_header *hdr; u64 rhf = rhf_to_cpu(rhf_addr); u32 etype = rhf_rcv_type(rhf), qpn; u8 opcode; u32 psn; u8 lnh; if (ps_done(&mdata, rhf, rcd)) break; if (ps_skip(&mdata, rhf, rcd)) goto next; if (etype > RHF_RCV_TYPE_IB) goto next; packet.hdr = hfi1_get_msgheader(rcd, rhf_addr); hdr = packet.hdr; lnh = be16_to_cpu(hdr->lrh[0]) & 3; if (lnh == HFI1_LRH_BTH) packet.ohdr = &hdr->u.oth; else if (lnh == HFI1_LRH_GRH) packet.ohdr = &hdr->u.l.oth; else goto next; /* just in case */ opcode = (be32_to_cpu(packet.ohdr->bth[0]) >> 24); qpn = be32_to_cpu(packet.ohdr->bth[1]) & RVT_QPN_MASK; psn = mask_psn(be32_to_cpu(packet.ohdr->bth[2])); seq_printf(s, "\tEnt %u: opcode 0x%x, qpn 0x%x, psn 0x%x\n", mdata.ps_head, opcode, qpn, psn); next: update_ps_mdata(&mdata, rcd); } } const rhf_rcv_function_ptr normal_rhf_rcv_functions[] = { [RHF_RCV_TYPE_EXPECTED] = kdeth_process_expected, [RHF_RCV_TYPE_EAGER] = kdeth_process_eager, [RHF_RCV_TYPE_IB] = process_receive_ib, [RHF_RCV_TYPE_ERROR] = process_receive_error, [RHF_RCV_TYPE_BYPASS] = process_receive_bypass, [RHF_RCV_TYPE_INVALID5] = process_receive_invalid, [RHF_RCV_TYPE_INVALID6] = process_receive_invalid, [RHF_RCV_TYPE_INVALID7] = process_receive_invalid, }; const rhf_rcv_function_ptr netdev_rhf_rcv_functions[] = { [RHF_RCV_TYPE_EXPECTED] = process_receive_invalid, [RHF_RCV_TYPE_EAGER] = process_receive_invalid, [RHF_RCV_TYPE_IB] = hfi1_ipoib_ib_rcv, [RHF_RCV_TYPE_ERROR] = process_receive_error, [RHF_RCV_TYPE_BYPASS] = hfi1_vnic_bypass_rcv, [RHF_RCV_TYPE_INVALID5] = process_receive_invalid, [RHF_RCV_TYPE_INVALID6] = process_receive_invalid, [RHF_RCV_TYPE_INVALID7] = process_receive_invalid, };
linux-master
drivers/infiniband/hw/hfi1/driver.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2015, 2016 Intel Corporation. */ #include <linux/pci.h> #include <linux/delay.h> #include <linux/bitmap.h> #include "hfi.h" #include "common.h" #include "sdma.h" #define LINK_UP_DELAY 500 /* in microseconds */ static void set_mgmt_allowed(struct hfi1_pportdata *ppd) { u32 frame; struct hfi1_devdata *dd = ppd->dd; if (ppd->neighbor_type == NEIGHBOR_TYPE_HFI) { ppd->mgmt_allowed = 1; } else { read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame); ppd->mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK; } } /* * Our neighbor has indicated that we are allowed to act as a fabric * manager, so place the full management partition key in the second * (0-based) pkey array position. Note that we should already have * the limited management partition key in array element 1, and also * that the port is not yet up when add_full_mgmt_pkey() is invoked. */ static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd) { struct hfi1_devdata *dd = ppd->dd; /* Sanity check - ppd->pkeys[2] should be 0, or already initialized */ if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY))) dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n", __func__, ppd->pkeys[2], FULL_MGMT_P_KEY); ppd->pkeys[2] = FULL_MGMT_P_KEY; (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); hfi1_event_pkey_change(ppd->dd, ppd->port); } /** * format_hwmsg - format a single hwerror message * @msg: message buffer * @msgl: length of message buffer * @hwmsg: message to add to message buffer */ static void format_hwmsg(char *msg, size_t msgl, const char *hwmsg) { strlcat(msg, "[", msgl); strlcat(msg, hwmsg, msgl); strlcat(msg, "]", msgl); } /** * hfi1_format_hwerrors - format hardware error messages for display * @hwerrs: hardware errors bit vector * @hwerrmsgs: hardware error descriptions * @nhwerrmsgs: number of hwerrmsgs * @msg: message buffer * @msgl: message buffer length */ void hfi1_format_hwerrors(u64 hwerrs, const struct hfi1_hwerror_msgs *hwerrmsgs, size_t nhwerrmsgs, char *msg, size_t msgl) { int i; for (i = 0; i < nhwerrmsgs; i++) if (hwerrs & hwerrmsgs[i].mask) format_hwmsg(msg, msgl, hwerrmsgs[i].msg); } static void signal_ib_event(struct hfi1_pportdata *ppd, enum ib_event_type ev) { struct ib_event event; struct hfi1_devdata *dd = ppd->dd; /* * Only call ib_dispatch_event() if the IB device has been * registered. HFI1_INITED is set iff the driver has successfully * registered with the IB core. */ if (!(dd->flags & HFI1_INITTED)) return; event.device = &dd->verbs_dev.rdi.ibdev; event.element.port_num = ppd->port; event.event = ev; ib_dispatch_event(&event); } /** * handle_linkup_change - finish linkup/down state changes * @dd: valid device * @linkup: link state information * * Handle a linkup or link down notification. * The HW needs time to finish its link up state change. Give it that chance. * * This is called outside an interrupt. * */ void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup) { struct hfi1_pportdata *ppd = &dd->pport[0]; enum ib_event_type ev; if (!(ppd->linkup ^ !!linkup)) return; /* no change, nothing to do */ if (linkup) { /* * Quick linkup and all link up on the simulator does not * trigger or implement: * - VerifyCap interrupt * - VerifyCap frames * But rather moves directly to LinkUp. * * Do the work of the VerifyCap interrupt handler, * handle_verify_cap(), but do not try moving the state to * LinkUp as we are already there. * * NOTE: This uses this device's vAU, vCU, and vl15_init for * the remote values. Both sides must be using the values. */ if (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR) { set_up_vau(dd, dd->vau); set_up_vl15(dd, dd->vl15_init); assign_remote_cm_au_table(dd, dd->vcu); } ppd->neighbor_guid = read_csr(dd, DC_DC8051_STS_REMOTE_GUID); ppd->neighbor_type = read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) & DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK; ppd->neighbor_port_number = read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) & DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK; ppd->neighbor_fm_security = read_csr(dd, DC_DC8051_STS_REMOTE_FM_SECURITY) & DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK; dd_dev_info(dd, "Neighbor Guid %llx, Type %d, Port Num %d\n", ppd->neighbor_guid, ppd->neighbor_type, ppd->neighbor_port_number); /* HW needs LINK_UP_DELAY to settle, give it that chance */ udelay(LINK_UP_DELAY); /* * 'MgmtAllowed' information, which is exchanged during * LNI, is available at this point. */ set_mgmt_allowed(ppd); if (ppd->mgmt_allowed) add_full_mgmt_pkey(ppd); /* physical link went up */ ppd->linkup = 1; ppd->offline_disabled_reason = HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE); /* link widths are not available until the link is fully up */ get_linkup_link_widths(ppd); } else { /* physical link went down */ ppd->linkup = 0; /* clear HW details of the previous connection */ ppd->actual_vls_operational = 0; reset_link_credits(dd); /* freeze after a link down to guarantee a clean egress */ start_freeze_handling(ppd, FREEZE_SELF | FREEZE_LINK_DOWN); ev = IB_EVENT_PORT_ERR; hfi1_set_uevent_bits(ppd, _HFI1_EVENT_LINKDOWN_BIT); /* if we are down, the neighbor is down */ ppd->neighbor_normal = 0; /* notify IB of the link change */ signal_ib_event(ppd, ev); } } /* * Handle receive or urgent interrupts for user contexts. This means a user * process was waiting for a packet to arrive, and didn't want to poll. */ void handle_user_interrupt(struct hfi1_ctxtdata *rcd) { struct hfi1_devdata *dd = rcd->dd; unsigned long flags; spin_lock_irqsave(&dd->uctxt_lock, flags); if (bitmap_empty(rcd->in_use_ctxts, HFI1_MAX_SHARED_CTXTS)) goto done; if (test_and_clear_bit(HFI1_CTXT_WAITING_RCV, &rcd->event_flags)) { wake_up_interruptible(&rcd->wait); hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_DIS, rcd); } else if (test_and_clear_bit(HFI1_CTXT_WAITING_URG, &rcd->event_flags)) { rcd->urgent++; wake_up_interruptible(&rcd->wait); } done: spin_unlock_irqrestore(&dd->uctxt_lock, flags); }
linux-master
drivers/infiniband/hw/hfi1/intr.c
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2015 - 2020 Intel Corporation. */ #include <linux/topology.h> #include <linux/cpumask.h> #include <linux/interrupt.h> #include <linux/numa.h> #include "hfi.h" #include "affinity.h" #include "sdma.h" #include "trace.h" struct hfi1_affinity_node_list node_affinity = { .list = LIST_HEAD_INIT(node_affinity.list), .lock = __MUTEX_INITIALIZER(node_affinity.lock) }; /* Name of IRQ types, indexed by enum irq_type */ static const char * const irq_type_names[] = { "SDMA", "RCVCTXT", "NETDEVCTXT", "GENERAL", "OTHER", }; /* Per NUMA node count of HFI devices */ static unsigned int *hfi1_per_node_cntr; static inline void init_cpu_mask_set(struct cpu_mask_set *set) { cpumask_clear(&set->mask); cpumask_clear(&set->used); set->gen = 0; } /* Increment generation of CPU set if needed */ static void _cpu_mask_set_gen_inc(struct cpu_mask_set *set) { if (cpumask_equal(&set->mask, &set->used)) { /* * We've used up all the CPUs, bump up the generation * and reset the 'used' map */ set->gen++; cpumask_clear(&set->used); } } static void _cpu_mask_set_gen_dec(struct cpu_mask_set *set) { if (cpumask_empty(&set->used) && set->gen) { set->gen--; cpumask_copy(&set->used, &set->mask); } } /* Get the first CPU from the list of unused CPUs in a CPU set data structure */ static int cpu_mask_set_get_first(struct cpu_mask_set *set, cpumask_var_t diff) { int cpu; if (!diff || !set) return -EINVAL; _cpu_mask_set_gen_inc(set); /* Find out CPUs left in CPU mask */ cpumask_andnot(diff, &set->mask, &set->used); cpu = cpumask_first(diff); if (cpu >= nr_cpu_ids) /* empty */ cpu = -EINVAL; else cpumask_set_cpu(cpu, &set->used); return cpu; } static void cpu_mask_set_put(struct cpu_mask_set *set, int cpu) { if (!set) return; cpumask_clear_cpu(cpu, &set->used); _cpu_mask_set_gen_dec(set); } /* Initialize non-HT cpu cores mask */ void init_real_cpu_mask(void) { int possible, curr_cpu, i, ht; cpumask_clear(&node_affinity.real_cpu_mask); /* Start with cpu online mask as the real cpu mask */ cpumask_copy(&node_affinity.real_cpu_mask, cpu_online_mask); /* * Remove HT cores from the real cpu mask. Do this in two steps below. */ possible = cpumask_weight(&node_affinity.real_cpu_mask); ht = cpumask_weight(topology_sibling_cpumask( cpumask_first(&node_affinity.real_cpu_mask))); /* * Step 1. Skip over the first N HT siblings and use them as the * "real" cores. Assumes that HT cores are not enumerated in * succession (except in the single core case). */ curr_cpu = cpumask_first(&node_affinity.real_cpu_mask); for (i = 0; i < possible / ht; i++) curr_cpu = cpumask_next(curr_cpu, &node_affinity.real_cpu_mask); /* * Step 2. Remove the remaining HT siblings. Use cpumask_next() to * skip any gaps. */ for (; i < possible; i++) { cpumask_clear_cpu(curr_cpu, &node_affinity.real_cpu_mask); curr_cpu = cpumask_next(curr_cpu, &node_affinity.real_cpu_mask); } } int node_affinity_init(void) { int node; struct pci_dev *dev = NULL; const struct pci_device_id *ids = hfi1_pci_tbl; cpumask_clear(&node_affinity.proc.used); cpumask_copy(&node_affinity.proc.mask, cpu_online_mask); node_affinity.proc.gen = 0; node_affinity.num_core_siblings = cpumask_weight(topology_sibling_cpumask( cpumask_first(&node_affinity.proc.mask) )); node_affinity.num_possible_nodes = num_possible_nodes(); node_affinity.num_online_nodes = num_online_nodes(); node_affinity.num_online_cpus = num_online_cpus(); /* * The real cpu mask is part of the affinity struct but it has to be * initialized early. It is needed to calculate the number of user * contexts in set_up_context_variables(). */ init_real_cpu_mask(); hfi1_per_node_cntr = kcalloc(node_affinity.num_possible_nodes, sizeof(*hfi1_per_node_cntr), GFP_KERNEL); if (!hfi1_per_node_cntr) return -ENOMEM; while (ids->vendor) { dev = NULL; while ((dev = pci_get_device(ids->vendor, ids->device, dev))) { node = pcibus_to_node(dev->bus); if (node < 0) goto out; hfi1_per_node_cntr[node]++; } ids++; } return 0; out: /* * Invalid PCI NUMA node information found, note it, and populate * our database 1:1. */ pr_err("HFI: Invalid PCI NUMA node. Performance may be affected\n"); pr_err("HFI: System BIOS may need to be upgraded\n"); for (node = 0; node < node_affinity.num_possible_nodes; node++) hfi1_per_node_cntr[node] = 1; pci_dev_put(dev); return 0; } static void node_affinity_destroy(struct hfi1_affinity_node *entry) { free_percpu(entry->comp_vect_affinity); kfree(entry); } void node_affinity_destroy_all(void) { struct list_head *pos, *q; struct hfi1_affinity_node *entry; mutex_lock(&node_affinity.lock); list_for_each_safe(pos, q, &node_affinity.list) { entry = list_entry(pos, struct hfi1_affinity_node, list); list_del(pos); node_affinity_destroy(entry); } mutex_unlock(&node_affinity.lock); kfree(hfi1_per_node_cntr); } static struct hfi1_affinity_node *node_affinity_allocate(int node) { struct hfi1_affinity_node *entry; entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return NULL; entry->node = node; entry->comp_vect_affinity = alloc_percpu(u16); INIT_LIST_HEAD(&entry->list); return entry; } /* * It appends an entry to the list. * It *must* be called with node_affinity.lock held. */ static void node_affinity_add_tail(struct hfi1_affinity_node *entry) { list_add_tail(&entry->list, &node_affinity.list); } /* It must be called with node_affinity.lock held */ static struct hfi1_affinity_node *node_affinity_lookup(int node) { struct hfi1_affinity_node *entry; list_for_each_entry(entry, &node_affinity.list, list) { if (entry->node == node) return entry; } return NULL; } static int per_cpu_affinity_get(cpumask_var_t possible_cpumask, u16 __percpu *comp_vect_affinity) { int curr_cpu; u16 cntr; u16 prev_cntr; int ret_cpu; if (!possible_cpumask) { ret_cpu = -EINVAL; goto fail; } if (!comp_vect_affinity) { ret_cpu = -EINVAL; goto fail; } ret_cpu = cpumask_first(possible_cpumask); if (ret_cpu >= nr_cpu_ids) { ret_cpu = -EINVAL; goto fail; } prev_cntr = *per_cpu_ptr(comp_vect_affinity, ret_cpu); for_each_cpu(curr_cpu, possible_cpumask) { cntr = *per_cpu_ptr(comp_vect_affinity, curr_cpu); if (cntr < prev_cntr) { ret_cpu = curr_cpu; prev_cntr = cntr; } } *per_cpu_ptr(comp_vect_affinity, ret_cpu) += 1; fail: return ret_cpu; } static int per_cpu_affinity_put_max(cpumask_var_t possible_cpumask, u16 __percpu *comp_vect_affinity) { int curr_cpu; int max_cpu; u16 cntr; u16 prev_cntr; if (!possible_cpumask) return -EINVAL; if (!comp_vect_affinity) return -EINVAL; max_cpu = cpumask_first(possible_cpumask); if (max_cpu >= nr_cpu_ids) return -EINVAL; prev_cntr = *per_cpu_ptr(comp_vect_affinity, max_cpu); for_each_cpu(curr_cpu, possible_cpumask) { cntr = *per_cpu_ptr(comp_vect_affinity, curr_cpu); if (cntr > prev_cntr) { max_cpu = curr_cpu; prev_cntr = cntr; } } *per_cpu_ptr(comp_vect_affinity, max_cpu) -= 1; return max_cpu; } /* * Non-interrupt CPUs are used first, then interrupt CPUs. * Two already allocated cpu masks must be passed. */ static int _dev_comp_vect_cpu_get(struct hfi1_devdata *dd, struct hfi1_affinity_node *entry, cpumask_var_t non_intr_cpus, cpumask_var_t available_cpus) __must_hold(&node_affinity.lock) { int cpu; struct cpu_mask_set *set = dd->comp_vect; lockdep_assert_held(&node_affinity.lock); if (!non_intr_cpus) { cpu = -1; goto fail; } if (!available_cpus) { cpu = -1; goto fail; } /* Available CPUs for pinning completion vectors */ _cpu_mask_set_gen_inc(set); cpumask_andnot(available_cpus, &set->mask, &set->used); /* Available CPUs without SDMA engine interrupts */ cpumask_andnot(non_intr_cpus, available_cpus, &entry->def_intr.used); /* If there are non-interrupt CPUs available, use them first */ if (!cpumask_empty(non_intr_cpus)) cpu = cpumask_first(non_intr_cpus); else /* Otherwise, use interrupt CPUs */ cpu = cpumask_first(available_cpus); if (cpu >= nr_cpu_ids) { /* empty */ cpu = -1; goto fail; } cpumask_set_cpu(cpu, &set->used); fail: return cpu; } static void _dev_comp_vect_cpu_put(struct hfi1_devdata *dd, int cpu) { struct cpu_mask_set *set = dd->comp_vect; if (cpu < 0) return; cpu_mask_set_put(set, cpu); } /* _dev_comp_vect_mappings_destroy() is reentrant */ static void _dev_comp_vect_mappings_destroy(struct hfi1_devdata *dd) { int i, cpu; if (!dd->comp_vect_mappings) return; for (i = 0; i < dd->comp_vect_possible_cpus; i++) { cpu = dd->comp_vect_mappings[i]; _dev_comp_vect_cpu_put(dd, cpu); dd->comp_vect_mappings[i] = -1; hfi1_cdbg(AFFINITY, "[%s] Release CPU %d from completion vector %d", rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), cpu, i); } kfree(dd->comp_vect_mappings); dd->comp_vect_mappings = NULL; } /* * This function creates the table for looking up CPUs for completion vectors. * num_comp_vectors needs to have been initilized before calling this function. */ static int _dev_comp_vect_mappings_create(struct hfi1_devdata *dd, struct hfi1_affinity_node *entry) __must_hold(&node_affinity.lock) { int i, cpu, ret; cpumask_var_t non_intr_cpus; cpumask_var_t available_cpus; lockdep_assert_held(&node_affinity.lock); if (!zalloc_cpumask_var(&non_intr_cpus, GFP_KERNEL)) return -ENOMEM; if (!zalloc_cpumask_var(&available_cpus, GFP_KERNEL)) { free_cpumask_var(non_intr_cpus); return -ENOMEM; } dd->comp_vect_mappings = kcalloc(dd->comp_vect_possible_cpus, sizeof(*dd->comp_vect_mappings), GFP_KERNEL); if (!dd->comp_vect_mappings) { ret = -ENOMEM; goto fail; } for (i = 0; i < dd->comp_vect_possible_cpus; i++) dd->comp_vect_mappings[i] = -1; for (i = 0; i < dd->comp_vect_possible_cpus; i++) { cpu = _dev_comp_vect_cpu_get(dd, entry, non_intr_cpus, available_cpus); if (cpu < 0) { ret = -EINVAL; goto fail; } dd->comp_vect_mappings[i] = cpu; hfi1_cdbg(AFFINITY, "[%s] Completion Vector %d -> CPU %d", rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), i, cpu); } free_cpumask_var(available_cpus); free_cpumask_var(non_intr_cpus); return 0; fail: free_cpumask_var(available_cpus); free_cpumask_var(non_intr_cpus); _dev_comp_vect_mappings_destroy(dd); return ret; } int hfi1_comp_vectors_set_up(struct hfi1_devdata *dd) { int ret; struct hfi1_affinity_node *entry; mutex_lock(&node_affinity.lock); entry = node_affinity_lookup(dd->node); if (!entry) { ret = -EINVAL; goto unlock; } ret = _dev_comp_vect_mappings_create(dd, entry); unlock: mutex_unlock(&node_affinity.lock); return ret; } void hfi1_comp_vectors_clean_up(struct hfi1_devdata *dd) { _dev_comp_vect_mappings_destroy(dd); } int hfi1_comp_vect_mappings_lookup(struct rvt_dev_info *rdi, int comp_vect) { struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi); struct hfi1_devdata *dd = dd_from_dev(verbs_dev); if (!dd->comp_vect_mappings) return -EINVAL; if (comp_vect >= dd->comp_vect_possible_cpus) return -EINVAL; return dd->comp_vect_mappings[comp_vect]; } /* * It assumes dd->comp_vect_possible_cpus is available. */ static int _dev_comp_vect_cpu_mask_init(struct hfi1_devdata *dd, struct hfi1_affinity_node *entry, bool first_dev_init) __must_hold(&node_affinity.lock) { int i, j, curr_cpu; int possible_cpus_comp_vect = 0; struct cpumask *dev_comp_vect_mask = &dd->comp_vect->mask; lockdep_assert_held(&node_affinity.lock); /* * If there's only one CPU available for completion vectors, then * there will only be one completion vector available. Othewise, * the number of completion vector available will be the number of * available CPUs divide it by the number of devices in the * local NUMA node. */ if (cpumask_weight(&entry->comp_vect_mask) == 1) { possible_cpus_comp_vect = 1; dd_dev_warn(dd, "Number of kernel receive queues is too large for completion vector affinity to be effective\n"); } else { possible_cpus_comp_vect += cpumask_weight(&entry->comp_vect_mask) / hfi1_per_node_cntr[dd->node]; /* * If the completion vector CPUs available doesn't divide * evenly among devices, then the first device device to be * initialized gets an extra CPU. */ if (first_dev_init && cpumask_weight(&entry->comp_vect_mask) % hfi1_per_node_cntr[dd->node] != 0) possible_cpus_comp_vect++; } dd->comp_vect_possible_cpus = possible_cpus_comp_vect; /* Reserving CPUs for device completion vector */ for (i = 0; i < dd->comp_vect_possible_cpus; i++) { curr_cpu = per_cpu_affinity_get(&entry->comp_vect_mask, entry->comp_vect_affinity); if (curr_cpu < 0) goto fail; cpumask_set_cpu(curr_cpu, dev_comp_vect_mask); } hfi1_cdbg(AFFINITY, "[%s] Completion vector affinity CPU set(s) %*pbl", rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), cpumask_pr_args(dev_comp_vect_mask)); return 0; fail: for (j = 0; j < i; j++) per_cpu_affinity_put_max(&entry->comp_vect_mask, entry->comp_vect_affinity); return curr_cpu; } /* * It assumes dd->comp_vect_possible_cpus is available. */ static void _dev_comp_vect_cpu_mask_clean_up(struct hfi1_devdata *dd, struct hfi1_affinity_node *entry) __must_hold(&node_affinity.lock) { int i, cpu; lockdep_assert_held(&node_affinity.lock); if (!dd->comp_vect_possible_cpus) return; for (i = 0; i < dd->comp_vect_possible_cpus; i++) { cpu = per_cpu_affinity_put_max(&dd->comp_vect->mask, entry->comp_vect_affinity); /* Clearing CPU in device completion vector cpu mask */ if (cpu >= 0) cpumask_clear_cpu(cpu, &dd->comp_vect->mask); } dd->comp_vect_possible_cpus = 0; } /* * Interrupt affinity. * * non-rcv avail gets a default mask that * starts as possible cpus with threads reset * and each rcv avail reset. * * rcv avail gets node relative 1 wrapping back * to the node relative 1 as necessary. * */ int hfi1_dev_affinity_init(struct hfi1_devdata *dd) { struct hfi1_affinity_node *entry; const struct cpumask *local_mask; int curr_cpu, possible, i, ret; bool new_entry = false; local_mask = cpumask_of_node(dd->node); if (cpumask_first(local_mask) >= nr_cpu_ids) local_mask = topology_core_cpumask(0); mutex_lock(&node_affinity.lock); entry = node_affinity_lookup(dd->node); /* * If this is the first time this NUMA node's affinity is used, * create an entry in the global affinity structure and initialize it. */ if (!entry) { entry = node_affinity_allocate(dd->node); if (!entry) { dd_dev_err(dd, "Unable to allocate global affinity node\n"); ret = -ENOMEM; goto fail; } new_entry = true; init_cpu_mask_set(&entry->def_intr); init_cpu_mask_set(&entry->rcv_intr); cpumask_clear(&entry->comp_vect_mask); cpumask_clear(&entry->general_intr_mask); /* Use the "real" cpu mask of this node as the default */ cpumask_and(&entry->def_intr.mask, &node_affinity.real_cpu_mask, local_mask); /* fill in the receive list */ possible = cpumask_weight(&entry->def_intr.mask); curr_cpu = cpumask_first(&entry->def_intr.mask); if (possible == 1) { /* only one CPU, everyone will use it */ cpumask_set_cpu(curr_cpu, &entry->rcv_intr.mask); cpumask_set_cpu(curr_cpu, &entry->general_intr_mask); } else { /* * The general/control context will be the first CPU in * the default list, so it is removed from the default * list and added to the general interrupt list. */ cpumask_clear_cpu(curr_cpu, &entry->def_intr.mask); cpumask_set_cpu(curr_cpu, &entry->general_intr_mask); curr_cpu = cpumask_next(curr_cpu, &entry->def_intr.mask); /* * Remove the remaining kernel receive queues from * the default list and add them to the receive list. */ for (i = 0; i < (dd->n_krcv_queues - 1) * hfi1_per_node_cntr[dd->node]; i++) { cpumask_clear_cpu(curr_cpu, &entry->def_intr.mask); cpumask_set_cpu(curr_cpu, &entry->rcv_intr.mask); curr_cpu = cpumask_next(curr_cpu, &entry->def_intr.mask); if (curr_cpu >= nr_cpu_ids) break; } /* * If there ends up being 0 CPU cores leftover for SDMA * engines, use the same CPU cores as general/control * context. */ if (cpumask_empty(&entry->def_intr.mask)) cpumask_copy(&entry->def_intr.mask, &entry->general_intr_mask); } /* Determine completion vector CPUs for the entire node */ cpumask_and(&entry->comp_vect_mask, &node_affinity.real_cpu_mask, local_mask); cpumask_andnot(&entry->comp_vect_mask, &entry->comp_vect_mask, &entry->rcv_intr.mask); cpumask_andnot(&entry->comp_vect_mask, &entry->comp_vect_mask, &entry->general_intr_mask); /* * If there ends up being 0 CPU cores leftover for completion * vectors, use the same CPU core as the general/control * context. */ if (cpumask_empty(&entry->comp_vect_mask)) cpumask_copy(&entry->comp_vect_mask, &entry->general_intr_mask); } ret = _dev_comp_vect_cpu_mask_init(dd, entry, new_entry); if (ret < 0) goto fail; if (new_entry) node_affinity_add_tail(entry); dd->affinity_entry = entry; mutex_unlock(&node_affinity.lock); return 0; fail: if (new_entry) node_affinity_destroy(entry); mutex_unlock(&node_affinity.lock); return ret; } void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd) { struct hfi1_affinity_node *entry; mutex_lock(&node_affinity.lock); if (!dd->affinity_entry) goto unlock; entry = node_affinity_lookup(dd->node); if (!entry) goto unlock; /* * Free device completion vector CPUs to be used by future * completion vectors */ _dev_comp_vect_cpu_mask_clean_up(dd, entry); unlock: dd->affinity_entry = NULL; mutex_unlock(&node_affinity.lock); } /* * Function updates the irq affinity hint for msix after it has been changed * by the user using the /proc/irq interface. This function only accepts * one cpu in the mask. */ static void hfi1_update_sdma_affinity(struct hfi1_msix_entry *msix, int cpu) { struct sdma_engine *sde = msix->arg; struct hfi1_devdata *dd = sde->dd; struct hfi1_affinity_node *entry; struct cpu_mask_set *set; int i, old_cpu; if (cpu > num_online_cpus() || cpu == sde->cpu) return; mutex_lock(&node_affinity.lock); entry = node_affinity_lookup(dd->node); if (!entry) goto unlock; old_cpu = sde->cpu; sde->cpu = cpu; cpumask_clear(&msix->mask); cpumask_set_cpu(cpu, &msix->mask); dd_dev_dbg(dd, "IRQ: %u, type %s engine %u -> cpu: %d\n", msix->irq, irq_type_names[msix->type], sde->this_idx, cpu); irq_set_affinity_hint(msix->irq, &msix->mask); /* * Set the new cpu in the hfi1_affinity_node and clean * the old cpu if it is not used by any other IRQ */ set = &entry->def_intr; cpumask_set_cpu(cpu, &set->mask); cpumask_set_cpu(cpu, &set->used); for (i = 0; i < dd->msix_info.max_requested; i++) { struct hfi1_msix_entry *other_msix; other_msix = &dd->msix_info.msix_entries[i]; if (other_msix->type != IRQ_SDMA || other_msix == msix) continue; if (cpumask_test_cpu(old_cpu, &other_msix->mask)) goto unlock; } cpumask_clear_cpu(old_cpu, &set->mask); cpumask_clear_cpu(old_cpu, &set->used); unlock: mutex_unlock(&node_affinity.lock); } static void hfi1_irq_notifier_notify(struct irq_affinity_notify *notify, const cpumask_t *mask) { int cpu = cpumask_first(mask); struct hfi1_msix_entry *msix = container_of(notify, struct hfi1_msix_entry, notify); /* Only one CPU configuration supported currently */ hfi1_update_sdma_affinity(msix, cpu); } static void hfi1_irq_notifier_release(struct kref *ref) { /* * This is required by affinity notifier. We don't have anything to * free here. */ } static void hfi1_setup_sdma_notifier(struct hfi1_msix_entry *msix) { struct irq_affinity_notify *notify = &msix->notify; notify->irq = msix->irq; notify->notify = hfi1_irq_notifier_notify; notify->release = hfi1_irq_notifier_release; if (irq_set_affinity_notifier(notify->irq, notify)) pr_err("Failed to register sdma irq affinity notifier for irq %d\n", notify->irq); } static void hfi1_cleanup_sdma_notifier(struct hfi1_msix_entry *msix) { struct irq_affinity_notify *notify = &msix->notify; if (irq_set_affinity_notifier(notify->irq, NULL)) pr_err("Failed to cleanup sdma irq affinity notifier for irq %d\n", notify->irq); } /* * Function sets the irq affinity for msix. * It *must* be called with node_affinity.lock held. */ static int get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix) { cpumask_var_t diff; struct hfi1_affinity_node *entry; struct cpu_mask_set *set = NULL; struct sdma_engine *sde = NULL; struct hfi1_ctxtdata *rcd = NULL; char extra[64]; int cpu = -1; extra[0] = '\0'; cpumask_clear(&msix->mask); entry = node_affinity_lookup(dd->node); switch (msix->type) { case IRQ_SDMA: sde = (struct sdma_engine *)msix->arg; scnprintf(extra, 64, "engine %u", sde->this_idx); set = &entry->def_intr; break; case IRQ_GENERAL: cpu = cpumask_first(&entry->general_intr_mask); break; case IRQ_RCVCTXT: rcd = (struct hfi1_ctxtdata *)msix->arg; if (rcd->ctxt == HFI1_CTRL_CTXT) cpu = cpumask_first(&entry->general_intr_mask); else set = &entry->rcv_intr; scnprintf(extra, 64, "ctxt %u", rcd->ctxt); break; case IRQ_NETDEVCTXT: rcd = (struct hfi1_ctxtdata *)msix->arg; set = &entry->def_intr; scnprintf(extra, 64, "ctxt %u", rcd->ctxt); break; default: dd_dev_err(dd, "Invalid IRQ type %d\n", msix->type); return -EINVAL; } /* * The general and control contexts are placed on a particular * CPU, which is set above. Skip accounting for it. Everything else * finds its CPU here. */ if (cpu == -1 && set) { if (!zalloc_cpumask_var(&diff, GFP_KERNEL)) return -ENOMEM; cpu = cpu_mask_set_get_first(set, diff); if (cpu < 0) { free_cpumask_var(diff); dd_dev_err(dd, "Failure to obtain CPU for IRQ\n"); return cpu; } free_cpumask_var(diff); } cpumask_set_cpu(cpu, &msix->mask); dd_dev_info(dd, "IRQ: %u, type %s %s -> cpu: %d\n", msix->irq, irq_type_names[msix->type], extra, cpu); irq_set_affinity_hint(msix->irq, &msix->mask); if (msix->type == IRQ_SDMA) { sde->cpu = cpu; hfi1_setup_sdma_notifier(msix); } return 0; } int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix) { int ret; mutex_lock(&node_affinity.lock); ret = get_irq_affinity(dd, msix); mutex_unlock(&node_affinity.lock); return ret; } void hfi1_put_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix) { struct cpu_mask_set *set = NULL; struct hfi1_affinity_node *entry; mutex_lock(&node_affinity.lock); entry = node_affinity_lookup(dd->node); switch (msix->type) { case IRQ_SDMA: set = &entry->def_intr; hfi1_cleanup_sdma_notifier(msix); break; case IRQ_GENERAL: /* Don't do accounting for general contexts */ break; case IRQ_RCVCTXT: { struct hfi1_ctxtdata *rcd = msix->arg; /* Don't do accounting for control contexts */ if (rcd->ctxt != HFI1_CTRL_CTXT) set = &entry->rcv_intr; break; } case IRQ_NETDEVCTXT: set = &entry->def_intr; break; default: mutex_unlock(&node_affinity.lock); return; } if (set) { cpumask_andnot(&set->used, &set->used, &msix->mask); _cpu_mask_set_gen_dec(set); } irq_set_affinity_hint(msix->irq, NULL); cpumask_clear(&msix->mask); mutex_unlock(&node_affinity.lock); } /* This should be called with node_affinity.lock held */ static void find_hw_thread_mask(uint hw_thread_no, cpumask_var_t hw_thread_mask, struct hfi1_affinity_node_list *affinity) { int possible, curr_cpu, i; uint num_cores_per_socket = node_affinity.num_online_cpus / affinity->num_core_siblings / node_affinity.num_online_nodes; cpumask_copy(hw_thread_mask, &affinity->proc.mask); if (affinity->num_core_siblings > 0) { /* Removing other siblings not needed for now */ possible = cpumask_weight(hw_thread_mask); curr_cpu = cpumask_first(hw_thread_mask); for (i = 0; i < num_cores_per_socket * node_affinity.num_online_nodes; i++) curr_cpu = cpumask_next(curr_cpu, hw_thread_mask); for (; i < possible; i++) { cpumask_clear_cpu(curr_cpu, hw_thread_mask); curr_cpu = cpumask_next(curr_cpu, hw_thread_mask); } /* Identifying correct HW threads within physical cores */ cpumask_shift_left(hw_thread_mask, hw_thread_mask, num_cores_per_socket * node_affinity.num_online_nodes * hw_thread_no); } } int hfi1_get_proc_affinity(int node) { int cpu = -1, ret, i; struct hfi1_affinity_node *entry; cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask; const struct cpumask *node_mask, *proc_mask = current->cpus_ptr; struct hfi1_affinity_node_list *affinity = &node_affinity; struct cpu_mask_set *set = &affinity->proc; /* * check whether process/context affinity has already * been set */ if (current->nr_cpus_allowed == 1) { hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl", current->pid, current->comm, cpumask_pr_args(proc_mask)); /* * Mark the pre-set CPU as used. This is atomic so we don't * need the lock */ cpu = cpumask_first(proc_mask); cpumask_set_cpu(cpu, &set->used); goto done; } else if (current->nr_cpus_allowed < cpumask_weight(&set->mask)) { hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl", current->pid, current->comm, cpumask_pr_args(proc_mask)); goto done; } /* * The process does not have a preset CPU affinity so find one to * recommend using the following algorithm: * * For each user process that is opening a context on HFI Y: * a) If all cores are filled, reinitialize the bitmask * b) Fill real cores first, then HT cores (First set of HT * cores on all physical cores, then second set of HT core, * and, so on) in the following order: * * 1. Same NUMA node as HFI Y and not running an IRQ * handler * 2. Same NUMA node as HFI Y and running an IRQ handler * 3. Different NUMA node to HFI Y and not running an IRQ * handler * 4. Different NUMA node to HFI Y and running an IRQ * handler * c) Mark core as filled in the bitmask. As user processes are * done, clear cores from the bitmask. */ ret = zalloc_cpumask_var(&diff, GFP_KERNEL); if (!ret) goto done; ret = zalloc_cpumask_var(&hw_thread_mask, GFP_KERNEL); if (!ret) goto free_diff; ret = zalloc_cpumask_var(&available_mask, GFP_KERNEL); if (!ret) goto free_hw_thread_mask; ret = zalloc_cpumask_var(&intrs_mask, GFP_KERNEL); if (!ret) goto free_available_mask; mutex_lock(&affinity->lock); /* * If we've used all available HW threads, clear the mask and start * overloading. */ _cpu_mask_set_gen_inc(set); /* * If NUMA node has CPUs used by interrupt handlers, include them in the * interrupt handler mask. */ entry = node_affinity_lookup(node); if (entry) { cpumask_copy(intrs_mask, (entry->def_intr.gen ? &entry->def_intr.mask : &entry->def_intr.used)); cpumask_or(intrs_mask, intrs_mask, (entry->rcv_intr.gen ? &entry->rcv_intr.mask : &entry->rcv_intr.used)); cpumask_or(intrs_mask, intrs_mask, &entry->general_intr_mask); } hfi1_cdbg(PROC, "CPUs used by interrupts: %*pbl", cpumask_pr_args(intrs_mask)); cpumask_copy(hw_thread_mask, &set->mask); /* * If HT cores are enabled, identify which HW threads within the * physical cores should be used. */ if (affinity->num_core_siblings > 0) { for (i = 0; i < affinity->num_core_siblings; i++) { find_hw_thread_mask(i, hw_thread_mask, affinity); /* * If there's at least one available core for this HW * thread number, stop looking for a core. * * diff will always be not empty at least once in this * loop as the used mask gets reset when * (set->mask == set->used) before this loop. */ cpumask_andnot(diff, hw_thread_mask, &set->used); if (!cpumask_empty(diff)) break; } } hfi1_cdbg(PROC, "Same available HW thread on all physical CPUs: %*pbl", cpumask_pr_args(hw_thread_mask)); node_mask = cpumask_of_node(node); hfi1_cdbg(PROC, "Device on NUMA %u, CPUs %*pbl", node, cpumask_pr_args(node_mask)); /* Get cpumask of available CPUs on preferred NUMA */ cpumask_and(available_mask, hw_thread_mask, node_mask); cpumask_andnot(available_mask, available_mask, &set->used); hfi1_cdbg(PROC, "Available CPUs on NUMA %u: %*pbl", node, cpumask_pr_args(available_mask)); /* * At first, we don't want to place processes on the same * CPUs as interrupt handlers. Then, CPUs running interrupt * handlers are used. * * 1) If diff is not empty, then there are CPUs not running * non-interrupt handlers available, so diff gets copied * over to available_mask. * 2) If diff is empty, then all CPUs not running interrupt * handlers are taken, so available_mask contains all * available CPUs running interrupt handlers. * 3) If available_mask is empty, then all CPUs on the * preferred NUMA node are taken, so other NUMA nodes are * used for process assignments using the same method as * the preferred NUMA node. */ cpumask_andnot(diff, available_mask, intrs_mask); if (!cpumask_empty(diff)) cpumask_copy(available_mask, diff); /* If we don't have CPUs on the preferred node, use other NUMA nodes */ if (cpumask_empty(available_mask)) { cpumask_andnot(available_mask, hw_thread_mask, &set->used); /* Excluding preferred NUMA cores */ cpumask_andnot(available_mask, available_mask, node_mask); hfi1_cdbg(PROC, "Preferred NUMA node cores are taken, cores available in other NUMA nodes: %*pbl", cpumask_pr_args(available_mask)); /* * At first, we don't want to place processes on the same * CPUs as interrupt handlers. */ cpumask_andnot(diff, available_mask, intrs_mask); if (!cpumask_empty(diff)) cpumask_copy(available_mask, diff); } hfi1_cdbg(PROC, "Possible CPUs for process: %*pbl", cpumask_pr_args(available_mask)); cpu = cpumask_first(available_mask); if (cpu >= nr_cpu_ids) /* empty */ cpu = -1; else cpumask_set_cpu(cpu, &set->used); mutex_unlock(&affinity->lock); hfi1_cdbg(PROC, "Process assigned to CPU %d", cpu); free_cpumask_var(intrs_mask); free_available_mask: free_cpumask_var(available_mask); free_hw_thread_mask: free_cpumask_var(hw_thread_mask); free_diff: free_cpumask_var(diff); done: return cpu; } void hfi1_put_proc_affinity(int cpu) { struct hfi1_affinity_node_list *affinity = &node_affinity; struct cpu_mask_set *set = &affinity->proc; if (cpu < 0) return; mutex_lock(&affinity->lock); cpu_mask_set_put(set, cpu); hfi1_cdbg(PROC, "Returning CPU %d for future process assignment", cpu); mutex_unlock(&affinity->lock); }
linux-master
drivers/infiniband/hw/hfi1/affinity.c
/* QLogic qedr NIC Driver * Copyright (c) 2015-2016 QLogic Corporation * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and /or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/dma-mapping.h> #include <linux/crc32.h> #include <linux/iommu.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/udp.h> #include <rdma/ib_verbs.h> #include <rdma/ib_user_verbs.h> #include <rdma/iw_cm.h> #include <rdma/ib_umem.h> #include <rdma/ib_addr.h> #include <rdma/ib_cache.h> #include <linux/qed/qed_if.h> #include <linux/qed/qed_rdma_if.h> #include "qedr.h" #include "verbs.h" #include <rdma/qedr-abi.h> #include "qedr_roce_cm.h" void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info) { info->gsi_cons = (info->gsi_cons + 1) % info->max_wr; } void qedr_store_gsi_qp_cq(struct qedr_dev *dev, struct qedr_qp *qp, struct ib_qp_init_attr *attrs) { dev->gsi_qp_created = 1; dev->gsi_sqcq = get_qedr_cq(attrs->send_cq); dev->gsi_rqcq = get_qedr_cq(attrs->recv_cq); dev->gsi_qp = qp; } static void qedr_ll2_complete_tx_packet(void *cxt, u8 connection_handle, void *cookie, dma_addr_t first_frag_addr, bool b_last_fragment, bool b_last_packet) { struct qedr_dev *dev = (struct qedr_dev *)cxt; struct qed_roce_ll2_packet *pkt = cookie; struct qedr_cq *cq = dev->gsi_sqcq; struct qedr_qp *qp = dev->gsi_qp; unsigned long flags; DP_DEBUG(dev, QEDR_MSG_GSI, "LL2 TX CB: gsi_sqcq=%p, gsi_rqcq=%p, gsi_cons=%d, ibcq_comp=%s\n", dev->gsi_sqcq, dev->gsi_rqcq, qp->sq.gsi_cons, cq->ibcq.comp_handler ? "Yes" : "No"); dma_free_coherent(&dev->pdev->dev, pkt->header.len, pkt->header.vaddr, pkt->header.baddr); kfree(pkt); spin_lock_irqsave(&qp->q_lock, flags); qedr_inc_sw_gsi_cons(&qp->sq); spin_unlock_irqrestore(&qp->q_lock, flags); if (cq->ibcq.comp_handler) (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); } static void qedr_ll2_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data) { struct qedr_dev *dev = (struct qedr_dev *)cxt; struct qedr_cq *cq = dev->gsi_rqcq; struct qedr_qp *qp = dev->gsi_qp; unsigned long flags; spin_lock_irqsave(&qp->q_lock, flags); qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ? -EINVAL : 0; qp->rqe_wr_id[qp->rq.gsi_cons].vlan = data->vlan; /* note: length stands for data length i.e. GRH is excluded */ qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length = data->length.data_length; *((u32 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[0]) = ntohl(data->opaque_data_0); *((u16 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[4]) = ntohs((u16)data->opaque_data_1); qedr_inc_sw_gsi_cons(&qp->rq); spin_unlock_irqrestore(&qp->q_lock, flags); if (cq->ibcq.comp_handler) (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); } static void qedr_ll2_release_rx_packet(void *cxt, u8 connection_handle, void *cookie, dma_addr_t rx_buf_addr, bool b_last_packet) { /* Do nothing... */ } static void qedr_destroy_gsi_cq(struct qedr_dev *dev, struct ib_qp_init_attr *attrs) { struct qed_rdma_destroy_cq_in_params iparams; struct qed_rdma_destroy_cq_out_params oparams; struct qedr_cq *cq; cq = get_qedr_cq(attrs->send_cq); iparams.icid = cq->icid; dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams); dev->ops->common->chain_free(dev->cdev, &cq->pbl); cq = get_qedr_cq(attrs->recv_cq); /* if a dedicated recv_cq was used, delete it too */ if (iparams.icid != cq->icid) { iparams.icid = cq->icid; dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams); dev->ops->common->chain_free(dev->cdev, &cq->pbl); } } static inline int qedr_check_gsi_qp_attrs(struct qedr_dev *dev, struct ib_qp_init_attr *attrs) { if (attrs->cap.max_recv_sge > QEDR_GSI_MAX_RECV_SGE) { DP_ERR(dev, " create gsi qp: failed. max_recv_sge is larger the max %d>%d\n", attrs->cap.max_recv_sge, QEDR_GSI_MAX_RECV_SGE); return -EINVAL; } if (attrs->cap.max_recv_wr > QEDR_GSI_MAX_RECV_WR) { DP_ERR(dev, " create gsi qp: failed. max_recv_wr is too large %d>%d\n", attrs->cap.max_recv_wr, QEDR_GSI_MAX_RECV_WR); return -EINVAL; } if (attrs->cap.max_send_wr > QEDR_GSI_MAX_SEND_WR) { DP_ERR(dev, " create gsi qp: failed. max_send_wr is too large %d>%d\n", attrs->cap.max_send_wr, QEDR_GSI_MAX_SEND_WR); return -EINVAL; } return 0; } static int qedr_ll2_post_tx(struct qedr_dev *dev, struct qed_roce_ll2_packet *pkt) { enum qed_ll2_roce_flavor_type roce_flavor; struct qed_ll2_tx_pkt_info ll2_tx_pkt; int rc; int i; memset(&ll2_tx_pkt, 0, sizeof(ll2_tx_pkt)); roce_flavor = (pkt->roce_mode == ROCE_V1) ? QED_LL2_ROCE : QED_LL2_RROCE; if (pkt->roce_mode == ROCE_V2_IPV4) ll2_tx_pkt.enable_ip_cksum = 1; ll2_tx_pkt.num_of_bds = 1 /* hdr */ + pkt->n_seg; ll2_tx_pkt.vlan = 0; ll2_tx_pkt.tx_dest = pkt->tx_dest; ll2_tx_pkt.qed_roce_flavor = roce_flavor; ll2_tx_pkt.first_frag = pkt->header.baddr; ll2_tx_pkt.first_frag_len = pkt->header.len; ll2_tx_pkt.cookie = pkt; /* tx header */ rc = dev->ops->ll2_prepare_tx_packet(dev->rdma_ctx, dev->gsi_ll2_handle, &ll2_tx_pkt, 1); if (rc) { /* TX failed while posting header - release resources */ dma_free_coherent(&dev->pdev->dev, pkt->header.len, pkt->header.vaddr, pkt->header.baddr); kfree(pkt); DP_ERR(dev, "roce ll2 tx: header failed (rc=%d)\n", rc); return rc; } /* tx payload */ for (i = 0; i < pkt->n_seg; i++) { rc = dev->ops->ll2_set_fragment_of_tx_packet( dev->rdma_ctx, dev->gsi_ll2_handle, pkt->payload[i].baddr, pkt->payload[i].len); if (rc) { /* if failed not much to do here, partial packet has * been posted we can't free memory, will need to wait * for completion */ DP_ERR(dev, "ll2 tx: payload failed (rc=%d)\n", rc); return rc; } } return 0; } static int qedr_ll2_stop(struct qedr_dev *dev) { int rc; if (dev->gsi_ll2_handle == QED_LL2_UNUSED_HANDLE) return 0; /* remove LL2 MAC address filter */ rc = dev->ops->ll2_set_mac_filter(dev->cdev, dev->gsi_ll2_mac_address, NULL); rc = dev->ops->ll2_terminate_connection(dev->rdma_ctx, dev->gsi_ll2_handle); if (rc) DP_ERR(dev, "Failed to terminate LL2 connection (rc=%d)\n", rc); dev->ops->ll2_release_connection(dev->rdma_ctx, dev->gsi_ll2_handle); dev->gsi_ll2_handle = QED_LL2_UNUSED_HANDLE; return rc; } static int qedr_ll2_start(struct qedr_dev *dev, struct ib_qp_init_attr *attrs, struct qedr_qp *qp) { struct qed_ll2_acquire_data data; struct qed_ll2_cbs cbs; int rc; /* configure and start LL2 */ cbs.rx_comp_cb = qedr_ll2_complete_rx_packet; cbs.tx_comp_cb = qedr_ll2_complete_tx_packet; cbs.rx_release_cb = qedr_ll2_release_rx_packet; cbs.tx_release_cb = qedr_ll2_complete_tx_packet; cbs.cookie = dev; memset(&data, 0, sizeof(data)); data.input.conn_type = QED_LL2_TYPE_ROCE; data.input.mtu = dev->ndev->mtu; data.input.rx_num_desc = attrs->cap.max_recv_wr; data.input.rx_drop_ttl0_flg = true; data.input.rx_vlan_removal_en = false; data.input.tx_num_desc = attrs->cap.max_send_wr; data.input.tx_tc = 0; data.input.tx_dest = QED_LL2_TX_DEST_NW; data.input.ai_err_packet_too_big = QED_LL2_DROP_PACKET; data.input.ai_err_no_buf = QED_LL2_DROP_PACKET; data.input.gsi_enable = 1; data.p_connection_handle = &dev->gsi_ll2_handle; data.cbs = &cbs; rc = dev->ops->ll2_acquire_connection(dev->rdma_ctx, &data); if (rc) { DP_ERR(dev, "ll2 start: failed to acquire LL2 connection (rc=%d)\n", rc); return rc; } rc = dev->ops->ll2_establish_connection(dev->rdma_ctx, dev->gsi_ll2_handle); if (rc) { DP_ERR(dev, "ll2 start: failed to establish LL2 connection (rc=%d)\n", rc); goto err1; } rc = dev->ops->ll2_set_mac_filter(dev->cdev, NULL, dev->ndev->dev_addr); if (rc) goto err2; return 0; err2: dev->ops->ll2_terminate_connection(dev->rdma_ctx, dev->gsi_ll2_handle); err1: dev->ops->ll2_release_connection(dev->rdma_ctx, dev->gsi_ll2_handle); return rc; } int qedr_create_gsi_qp(struct qedr_dev *dev, struct ib_qp_init_attr *attrs, struct qedr_qp *qp) { int rc; rc = qedr_check_gsi_qp_attrs(dev, attrs); if (rc) return rc; rc = qedr_ll2_start(dev, attrs, qp); if (rc) { DP_ERR(dev, "create gsi qp: failed on ll2 start. rc=%d\n", rc); return rc; } /* create QP */ qp->ibqp.qp_num = 1; qp->rq.max_wr = attrs->cap.max_recv_wr; qp->sq.max_wr = attrs->cap.max_send_wr; qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id), GFP_KERNEL); if (!qp->rqe_wr_id) goto err; qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id), GFP_KERNEL); if (!qp->wqe_wr_id) goto err; qedr_store_gsi_qp_cq(dev, qp, attrs); ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr); /* the GSI CQ is handled by the driver so remove it from the FW */ qedr_destroy_gsi_cq(dev, attrs); dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI; DP_DEBUG(dev, QEDR_MSG_GSI, "created GSI QP %p\n", qp); return 0; err: kfree(qp->rqe_wr_id); rc = qedr_ll2_stop(dev); if (rc) DP_ERR(dev, "create gsi qp: failed destroy on create\n"); return -ENOMEM; } int qedr_destroy_gsi_qp(struct qedr_dev *dev) { return qedr_ll2_stop(dev); } #define QEDR_MAX_UD_HEADER_SIZE (100) #define QEDR_GSI_QPN (1) static inline int qedr_gsi_build_header(struct qedr_dev *dev, struct qedr_qp *qp, const struct ib_send_wr *swr, struct ib_ud_header *udh, int *roce_mode) { bool has_vlan = false, has_grh_ipv6 = true; struct rdma_ah_attr *ah_attr = &get_qedr_ah(ud_wr(swr)->ah)->attr; const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr); const struct ib_gid_attr *sgid_attr = grh->sgid_attr; int send_size = 0; u16 vlan_id = 0; u16 ether_type; int rc; int ip_ver = 0; bool has_udp = false; int i; rc = rdma_read_gid_l2_fields(sgid_attr, &vlan_id, NULL); if (rc) return rc; if (vlan_id < VLAN_CFI_MASK) has_vlan = true; send_size = 0; for (i = 0; i < swr->num_sge; ++i) send_size += swr->sg_list[i].length; has_udp = (sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP); if (!has_udp) { /* RoCE v1 */ ether_type = ETH_P_IBOE; *roce_mode = ROCE_V1; } else if (ipv6_addr_v4mapped((struct in6_addr *)&sgid_attr->gid)) { /* RoCE v2 IPv4 */ ip_ver = 4; ether_type = ETH_P_IP; has_grh_ipv6 = false; *roce_mode = ROCE_V2_IPV4; } else { /* RoCE v2 IPv6 */ ip_ver = 6; ether_type = ETH_P_IPV6; *roce_mode = ROCE_V2_IPV6; } rc = ib_ud_header_init(send_size, false, true, has_vlan, has_grh_ipv6, ip_ver, has_udp, 0, udh); if (rc) { DP_ERR(dev, "gsi post send: failed to init header\n"); return rc; } /* ENET + VLAN headers */ ether_addr_copy(udh->eth.dmac_h, ah_attr->roce.dmac); ether_addr_copy(udh->eth.smac_h, dev->ndev->dev_addr); if (has_vlan) { udh->eth.type = htons(ETH_P_8021Q); udh->vlan.tag = htons(vlan_id); udh->vlan.type = htons(ether_type); } else { udh->eth.type = htons(ether_type); } /* BTH */ udh->bth.solicited_event = !!(swr->send_flags & IB_SEND_SOLICITED); udh->bth.pkey = QEDR_ROCE_PKEY_DEFAULT; udh->bth.destination_qpn = htonl(ud_wr(swr)->remote_qpn); udh->bth.psn = htonl((qp->sq_psn++) & ((1 << 24) - 1)); udh->bth.opcode = IB_OPCODE_UD_SEND_ONLY; /* DETH */ udh->deth.qkey = htonl(0x80010000); udh->deth.source_qpn = htonl(QEDR_GSI_QPN); if (has_grh_ipv6) { /* GRH / IPv6 header */ udh->grh.traffic_class = grh->traffic_class; udh->grh.flow_label = grh->flow_label; udh->grh.hop_limit = grh->hop_limit; udh->grh.destination_gid = grh->dgid; memcpy(&udh->grh.source_gid.raw, sgid_attr->gid.raw, sizeof(udh->grh.source_gid.raw)); } else { /* IPv4 header */ u32 ipv4_addr; udh->ip4.protocol = IPPROTO_UDP; udh->ip4.tos = htonl(grh->flow_label); udh->ip4.frag_off = htons(IP_DF); udh->ip4.ttl = grh->hop_limit; ipv4_addr = qedr_get_ipv4_from_gid(sgid_attr->gid.raw); udh->ip4.saddr = ipv4_addr; ipv4_addr = qedr_get_ipv4_from_gid(grh->dgid.raw); udh->ip4.daddr = ipv4_addr; /* note: checksum is calculated by the device */ } /* UDP */ if (has_udp) { udh->udp.sport = htons(QEDR_ROCE_V2_UDP_SPORT); udh->udp.dport = htons(ROCE_V2_UDP_DPORT); udh->udp.csum = 0; /* UDP length is untouched hence is zero */ } return 0; } static inline int qedr_gsi_build_packet(struct qedr_dev *dev, struct qedr_qp *qp, const struct ib_send_wr *swr, struct qed_roce_ll2_packet **p_packet) { u8 ud_header_buffer[QEDR_MAX_UD_HEADER_SIZE]; struct qed_roce_ll2_packet *packet; struct pci_dev *pdev = dev->pdev; int roce_mode, header_size; struct ib_ud_header udh; int i, rc; *p_packet = NULL; rc = qedr_gsi_build_header(dev, qp, swr, &udh, &roce_mode); if (rc) return rc; header_size = ib_ud_header_pack(&udh, &ud_header_buffer); packet = kzalloc(sizeof(*packet), GFP_ATOMIC); if (!packet) return -ENOMEM; packet->header.vaddr = dma_alloc_coherent(&pdev->dev, header_size, &packet->header.baddr, GFP_ATOMIC); if (!packet->header.vaddr) { kfree(packet); return -ENOMEM; } if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h)) packet->tx_dest = QED_LL2_TX_DEST_LB; else packet->tx_dest = QED_LL2_TX_DEST_NW; packet->roce_mode = roce_mode; memcpy(packet->header.vaddr, ud_header_buffer, header_size); packet->header.len = header_size; packet->n_seg = swr->num_sge; for (i = 0; i < packet->n_seg; i++) { packet->payload[i].baddr = swr->sg_list[i].addr; packet->payload[i].len = swr->sg_list[i].length; } *p_packet = packet; return 0; } int qedr_gsi_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) { struct qed_roce_ll2_packet *pkt = NULL; struct qedr_qp *qp = get_qedr_qp(ibqp); struct qedr_dev *dev = qp->dev; unsigned long flags; int rc; if (qp->state != QED_ROCE_QP_STATE_RTS) { *bad_wr = wr; DP_ERR(dev, "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTS\n", qp->state); return -EINVAL; } if (wr->num_sge > RDMA_MAX_SGE_PER_SQ_WQE) { DP_ERR(dev, "gsi post send: num_sge is too large (%d>%d)\n", wr->num_sge, RDMA_MAX_SGE_PER_SQ_WQE); rc = -EINVAL; goto err; } if (wr->opcode != IB_WR_SEND) { DP_ERR(dev, "gsi post send: failed due to unsupported opcode %d\n", wr->opcode); rc = -EINVAL; goto err; } spin_lock_irqsave(&qp->q_lock, flags); rc = qedr_gsi_build_packet(dev, qp, wr, &pkt); if (rc) { spin_unlock_irqrestore(&qp->q_lock, flags); goto err; } rc = qedr_ll2_post_tx(dev, pkt); if (!rc) { qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id; qedr_inc_sw_prod(&qp->sq); DP_DEBUG(qp->dev, QEDR_MSG_GSI, "gsi post send: opcode=%d, wr_id=%llx\n", wr->opcode, wr->wr_id); } else { DP_ERR(dev, "gsi post send: failed to transmit (rc=%d)\n", rc); rc = -EAGAIN; *bad_wr = wr; } spin_unlock_irqrestore(&qp->q_lock, flags); if (wr->next) { DP_ERR(dev, "gsi post send: failed second WR. Only one WR may be passed at a time\n"); *bad_wr = wr->next; rc = -EINVAL; } return rc; err: *bad_wr = wr; return rc; } int qedr_gsi_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) { struct qedr_dev *dev = get_qedr_dev(ibqp->device); struct qedr_qp *qp = get_qedr_qp(ibqp); unsigned long flags; int rc = 0; if ((qp->state != QED_ROCE_QP_STATE_RTR) && (qp->state != QED_ROCE_QP_STATE_RTS)) { *bad_wr = wr; DP_ERR(dev, "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTR/S\n", qp->state); return -EINVAL; } spin_lock_irqsave(&qp->q_lock, flags); while (wr) { if (wr->num_sge > QEDR_GSI_MAX_RECV_SGE) { DP_ERR(dev, "gsi post recv: failed to post rx buffer. too many sges %d>%d\n", wr->num_sge, QEDR_GSI_MAX_RECV_SGE); goto err; } rc = dev->ops->ll2_post_rx_buffer(dev->rdma_ctx, dev->gsi_ll2_handle, wr->sg_list[0].addr, wr->sg_list[0].length, NULL /* cookie */, 1 /* notify_fw */); if (rc) { DP_ERR(dev, "gsi post recv: failed to post rx buffer (rc=%d)\n", rc); goto err; } memset(&qp->rqe_wr_id[qp->rq.prod], 0, sizeof(qp->rqe_wr_id[qp->rq.prod])); qp->rqe_wr_id[qp->rq.prod].sg_list[0] = wr->sg_list[0]; qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id; qedr_inc_sw_prod(&qp->rq); wr = wr->next; } spin_unlock_irqrestore(&qp->q_lock, flags); return rc; err: spin_unlock_irqrestore(&qp->q_lock, flags); *bad_wr = wr; return -ENOMEM; } int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) { struct qedr_dev *dev = get_qedr_dev(ibcq->device); struct qedr_cq *cq = get_qedr_cq(ibcq); struct qedr_qp *qp = dev->gsi_qp; unsigned long flags; u16 vlan_id; int i = 0; spin_lock_irqsave(&cq->cq_lock, flags); while (i < num_entries && qp->rq.cons != qp->rq.gsi_cons) { memset(&wc[i], 0, sizeof(*wc)); wc[i].qp = &qp->ibqp; wc[i].wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id; wc[i].opcode = IB_WC_RECV; wc[i].pkey_index = 0; wc[i].status = (qp->rqe_wr_id[qp->rq.cons].rc) ? IB_WC_GENERAL_ERR : IB_WC_SUCCESS; /* 0 - currently only one recv sg is supported */ wc[i].byte_len = qp->rqe_wr_id[qp->rq.cons].sg_list[0].length; wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK; ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac); wc[i].wc_flags |= IB_WC_WITH_SMAC; vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan & VLAN_VID_MASK; if (vlan_id) { wc[i].wc_flags |= IB_WC_WITH_VLAN; wc[i].vlan_id = vlan_id; wc[i].sl = (qp->rqe_wr_id[qp->rq.cons].vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; } qedr_inc_sw_cons(&qp->rq); i++; } while (i < num_entries && qp->sq.cons != qp->sq.gsi_cons) { memset(&wc[i], 0, sizeof(*wc)); wc[i].qp = &qp->ibqp; wc[i].wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id; wc[i].opcode = IB_WC_SEND; wc[i].status = IB_WC_SUCCESS; qedr_inc_sw_cons(&qp->sq); i++; } spin_unlock_irqrestore(&cq->cq_lock, flags); DP_DEBUG(dev, QEDR_MSG_GSI, "gsi poll_cq: requested entries=%d, actual=%d, qp->rq.cons=%d, qp->rq.gsi_cons=%x, qp->sq.cons=%d, qp->sq.gsi_cons=%d, qp_num=%d\n", num_entries, i, qp->rq.cons, qp->rq.gsi_cons, qp->sq.cons, qp->sq.gsi_cons, qp->ibqp.qp_num); return i; }
linux-master
drivers/infiniband/hw/qedr/qedr_roce_cm.c
/* QLogic qedr NIC Driver * Copyright (c) 2015-2016 QLogic Corporation * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and /or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/module.h> #include <rdma/ib_verbs.h> #include <rdma/ib_addr.h> #include <rdma/ib_user_verbs.h> #include <rdma/iw_cm.h> #include <rdma/ib_mad.h> #include <linux/netdevice.h> #include <linux/iommu.h> #include <linux/pci.h> #include <net/addrconf.h> #include <linux/qed/qed_chain.h> #include <linux/qed/qed_if.h> #include "qedr.h" #include "verbs.h" #include <rdma/qedr-abi.h> #include "qedr_iw_cm.h" MODULE_DESCRIPTION("QLogic 40G/100G ROCE Driver"); MODULE_AUTHOR("QLogic Corporation"); MODULE_LICENSE("Dual BSD/GPL"); #define QEDR_WQ_MULTIPLIER_DFT (3) static void qedr_ib_dispatch_event(struct qedr_dev *dev, u32 port_num, enum ib_event_type type) { struct ib_event ibev; ibev.device = &dev->ibdev; ibev.element.port_num = port_num; ibev.event = type; ib_dispatch_event(&ibev); } static enum rdma_link_layer qedr_link_layer(struct ib_device *device, u32 port_num) { return IB_LINK_LAYER_ETHERNET; } static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str) { struct qedr_dev *qedr = get_qedr_dev(ibdev); u32 fw_ver = (u32)qedr->attr.fw_ver; snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d", (fw_ver >> 24) & 0xFF, (fw_ver >> 16) & 0xFF, (fw_ver >> 8) & 0xFF, fw_ver & 0xFF); } static int qedr_roce_port_immutable(struct ib_device *ibdev, u32 port_num, struct ib_port_immutable *immutable) { struct ib_port_attr attr; int err; err = qedr_query_port(ibdev, port_num, &attr); if (err) return err; immutable->pkey_tbl_len = attr.pkey_tbl_len; immutable->gid_tbl_len = attr.gid_tbl_len; immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE | RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; immutable->max_mad_size = IB_MGMT_MAD_SIZE; return 0; } static int qedr_iw_port_immutable(struct ib_device *ibdev, u32 port_num, struct ib_port_immutable *immutable) { struct ib_port_attr attr; int err; err = qedr_query_port(ibdev, port_num, &attr); if (err) return err; immutable->gid_tbl_len = 1; immutable->core_cap_flags = RDMA_CORE_PORT_IWARP; immutable->max_mad_size = 0; return 0; } /* QEDR sysfs interface */ static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr, char *buf) { struct qedr_dev *dev = rdma_device_to_drv_device(device, struct qedr_dev, ibdev); return sysfs_emit(buf, "0x%x\n", dev->attr.hw_ver); } static DEVICE_ATTR_RO(hw_rev); static ssize_t hca_type_show(struct device *device, struct device_attribute *attr, char *buf) { struct qedr_dev *dev = rdma_device_to_drv_device(device, struct qedr_dev, ibdev); return sysfs_emit(buf, "FastLinQ QL%x %s\n", dev->pdev->device, rdma_protocol_iwarp(&dev->ibdev, 1) ? "iWARP" : "RoCE"); } static DEVICE_ATTR_RO(hca_type); static struct attribute *qedr_attributes[] = { &dev_attr_hw_rev.attr, &dev_attr_hca_type.attr, NULL }; static const struct attribute_group qedr_attr_group = { .attrs = qedr_attributes, }; static const struct ib_device_ops qedr_iw_dev_ops = { .get_port_immutable = qedr_iw_port_immutable, .iw_accept = qedr_iw_accept, .iw_add_ref = qedr_iw_qp_add_ref, .iw_connect = qedr_iw_connect, .iw_create_listen = qedr_iw_create_listen, .iw_destroy_listen = qedr_iw_destroy_listen, .iw_get_qp = qedr_iw_get_qp, .iw_reject = qedr_iw_reject, .iw_rem_ref = qedr_iw_qp_rem_ref, .query_gid = qedr_iw_query_gid, }; static int qedr_iw_register_device(struct qedr_dev *dev) { dev->ibdev.node_type = RDMA_NODE_RNIC; ib_set_device_ops(&dev->ibdev, &qedr_iw_dev_ops); memcpy(dev->ibdev.iw_ifname, dev->ndev->name, sizeof(dev->ibdev.iw_ifname)); return 0; } static const struct ib_device_ops qedr_roce_dev_ops = { .alloc_xrcd = qedr_alloc_xrcd, .dealloc_xrcd = qedr_dealloc_xrcd, .get_port_immutable = qedr_roce_port_immutable, .query_pkey = qedr_query_pkey, }; static void qedr_roce_register_device(struct qedr_dev *dev) { dev->ibdev.node_type = RDMA_NODE_IB_CA; ib_set_device_ops(&dev->ibdev, &qedr_roce_dev_ops); } static const struct ib_device_ops qedr_dev_ops = { .owner = THIS_MODULE, .driver_id = RDMA_DRIVER_QEDR, .uverbs_abi_ver = QEDR_ABI_VERSION, .alloc_mr = qedr_alloc_mr, .alloc_pd = qedr_alloc_pd, .alloc_ucontext = qedr_alloc_ucontext, .create_ah = qedr_create_ah, .create_cq = qedr_create_cq, .create_qp = qedr_create_qp, .create_srq = qedr_create_srq, .dealloc_pd = qedr_dealloc_pd, .dealloc_ucontext = qedr_dealloc_ucontext, .dereg_mr = qedr_dereg_mr, .destroy_ah = qedr_destroy_ah, .destroy_cq = qedr_destroy_cq, .destroy_qp = qedr_destroy_qp, .destroy_srq = qedr_destroy_srq, .device_group = &qedr_attr_group, .get_dev_fw_str = qedr_get_dev_fw_str, .get_dma_mr = qedr_get_dma_mr, .get_link_layer = qedr_link_layer, .map_mr_sg = qedr_map_mr_sg, .mmap = qedr_mmap, .mmap_free = qedr_mmap_free, .modify_qp = qedr_modify_qp, .modify_srq = qedr_modify_srq, .poll_cq = qedr_poll_cq, .post_recv = qedr_post_recv, .post_send = qedr_post_send, .post_srq_recv = qedr_post_srq_recv, .process_mad = qedr_process_mad, .query_device = qedr_query_device, .query_port = qedr_query_port, .query_qp = qedr_query_qp, .query_srq = qedr_query_srq, .reg_user_mr = qedr_reg_user_mr, .req_notify_cq = qedr_arm_cq, INIT_RDMA_OBJ_SIZE(ib_ah, qedr_ah, ibah), INIT_RDMA_OBJ_SIZE(ib_cq, qedr_cq, ibcq), INIT_RDMA_OBJ_SIZE(ib_pd, qedr_pd, ibpd), INIT_RDMA_OBJ_SIZE(ib_qp, qedr_qp, ibqp), INIT_RDMA_OBJ_SIZE(ib_srq, qedr_srq, ibsrq), INIT_RDMA_OBJ_SIZE(ib_xrcd, qedr_xrcd, ibxrcd), INIT_RDMA_OBJ_SIZE(ib_ucontext, qedr_ucontext, ibucontext), }; static int qedr_register_device(struct qedr_dev *dev) { int rc; dev->ibdev.node_guid = dev->attr.node_guid; memcpy(dev->ibdev.node_desc, QEDR_NODE_DESC, sizeof(QEDR_NODE_DESC)); if (IS_IWARP(dev)) { rc = qedr_iw_register_device(dev); if (rc) return rc; } else { qedr_roce_register_device(dev); } dev->ibdev.phys_port_cnt = 1; dev->ibdev.num_comp_vectors = dev->num_cnq; dev->ibdev.dev.parent = &dev->pdev->dev; ib_set_device_ops(&dev->ibdev, &qedr_dev_ops); rc = ib_device_set_netdev(&dev->ibdev, dev->ndev, 1); if (rc) return rc; dma_set_max_seg_size(&dev->pdev->dev, UINT_MAX); return ib_register_device(&dev->ibdev, "qedr%d", &dev->pdev->dev); } /* This function allocates fast-path status block memory */ static int qedr_alloc_mem_sb(struct qedr_dev *dev, struct qed_sb_info *sb_info, u16 sb_id) { struct status_block *sb_virt; dma_addr_t sb_phys; int rc; sb_virt = dma_alloc_coherent(&dev->pdev->dev, sizeof(*sb_virt), &sb_phys, GFP_KERNEL); if (!sb_virt) return -ENOMEM; rc = dev->ops->common->sb_init(dev->cdev, sb_info, sb_virt, sb_phys, sb_id, QED_SB_TYPE_CNQ); if (rc) { pr_err("Status block initialization failed\n"); dma_free_coherent(&dev->pdev->dev, sizeof(*sb_virt), sb_virt, sb_phys); return rc; } return 0; } static void qedr_free_mem_sb(struct qedr_dev *dev, struct qed_sb_info *sb_info, int sb_id) { if (sb_info->sb_virt) { dev->ops->common->sb_release(dev->cdev, sb_info, sb_id, QED_SB_TYPE_CNQ); dma_free_coherent(&dev->pdev->dev, sizeof(*sb_info->sb_virt), (void *)sb_info->sb_virt, sb_info->sb_phys); } } static void qedr_free_resources(struct qedr_dev *dev) { int i; if (IS_IWARP(dev)) destroy_workqueue(dev->iwarp_wq); for (i = 0; i < dev->num_cnq; i++) { qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i); dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl); } kfree(dev->cnq_array); kfree(dev->sb_array); kfree(dev->sgid_tbl); } static int qedr_alloc_resources(struct qedr_dev *dev) { struct qed_chain_init_params params = { .mode = QED_CHAIN_MODE_PBL, .intended_use = QED_CHAIN_USE_TO_CONSUME, .cnt_type = QED_CHAIN_CNT_TYPE_U16, .elem_size = sizeof(struct regpair *), }; struct qedr_cnq *cnq; __le16 *cons_pi; int i, rc; dev->sgid_tbl = kcalloc(QEDR_MAX_SGID, sizeof(union ib_gid), GFP_KERNEL); if (!dev->sgid_tbl) return -ENOMEM; spin_lock_init(&dev->sgid_lock); xa_init_flags(&dev->srqs, XA_FLAGS_LOCK_IRQ); if (IS_IWARP(dev)) { xa_init(&dev->qps); dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq"); if (!dev->iwarp_wq) { rc = -ENOMEM; goto err1; } } /* Allocate Status blocks for CNQ */ dev->sb_array = kcalloc(dev->num_cnq, sizeof(*dev->sb_array), GFP_KERNEL); if (!dev->sb_array) { rc = -ENOMEM; goto err_destroy_wq; } dev->cnq_array = kcalloc(dev->num_cnq, sizeof(*dev->cnq_array), GFP_KERNEL); if (!dev->cnq_array) { rc = -ENOMEM; goto err2; } dev->sb_start = dev->ops->rdma_get_start_sb(dev->cdev); /* Allocate CNQ PBLs */ params.num_elems = min_t(u32, QED_RDMA_MAX_CNQ_SIZE, QEDR_ROCE_MAX_CNQ_SIZE); for (i = 0; i < dev->num_cnq; i++) { cnq = &dev->cnq_array[i]; rc = qedr_alloc_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i); if (rc) goto err3; rc = dev->ops->common->chain_alloc(dev->cdev, &cnq->pbl, &params); if (rc) goto err4; cnq->dev = dev; cnq->sb = &dev->sb_array[i]; cons_pi = dev->sb_array[i].sb_virt->pi_array; cnq->hw_cons_ptr = &cons_pi[QED_ROCE_PROTOCOL_INDEX]; cnq->index = i; sprintf(cnq->name, "qedr%d@pci:%s", i, pci_name(dev->pdev)); DP_DEBUG(dev, QEDR_MSG_INIT, "cnq[%d].cons=%d\n", i, qed_chain_get_cons_idx(&cnq->pbl)); } return 0; err4: qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i); err3: for (--i; i >= 0; i--) { dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl); qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i); } kfree(dev->cnq_array); err2: kfree(dev->sb_array); err_destroy_wq: if (IS_IWARP(dev)) destroy_workqueue(dev->iwarp_wq); err1: kfree(dev->sgid_tbl); return rc; } static void qedr_pci_set_atomic(struct qedr_dev *dev, struct pci_dev *pdev) { int rc = pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP64); if (rc) { dev->atomic_cap = IB_ATOMIC_NONE; DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability disabled\n"); } else { dev->atomic_cap = IB_ATOMIC_GLOB; DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability enabled\n"); } } static const struct qed_rdma_ops *qed_ops; #define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo)) static irqreturn_t qedr_irq_handler(int irq, void *handle) { u16 hw_comp_cons, sw_comp_cons; struct qedr_cnq *cnq = handle; struct regpair *cq_handle; struct qedr_cq *cq; qed_sb_ack(cnq->sb, IGU_INT_DISABLE, 0); qed_sb_update_sb_idx(cnq->sb); hw_comp_cons = le16_to_cpu(*cnq->hw_cons_ptr); sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl); /* Align protocol-index and chain reads */ rmb(); while (sw_comp_cons != hw_comp_cons) { cq_handle = (struct regpair *)qed_chain_consume(&cnq->pbl); cq = (struct qedr_cq *)(uintptr_t)HILO_U64(cq_handle->hi, cq_handle->lo); if (cq == NULL) { DP_ERR(cnq->dev, "Received NULL CQ cq_handle->hi=%d cq_handle->lo=%d sw_comp_cons=%d hw_comp_cons=%d\n", cq_handle->hi, cq_handle->lo, sw_comp_cons, hw_comp_cons); break; } if (cq->sig != QEDR_CQ_MAGIC_NUMBER) { DP_ERR(cnq->dev, "Problem with cq signature, cq_handle->hi=%d ch_handle->lo=%d cq=%p\n", cq_handle->hi, cq_handle->lo, cq); break; } cq->arm_flags = 0; if (!cq->destroyed && cq->ibcq.comp_handler) (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); /* The CQ's CNQ notification counter is checked before * destroying the CQ in a busy-wait loop that waits for all of * the CQ's CNQ interrupts to be processed. It is increased * here, only after the completion handler, to ensure that * the handler is not running when the CQ is destroyed. */ cq->cnq_notif++; sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl); cnq->n_comp++; } qed_ops->rdma_cnq_prod_update(cnq->dev->rdma_ctx, cnq->index, sw_comp_cons); qed_sb_ack(cnq->sb, IGU_INT_ENABLE, 1); return IRQ_HANDLED; } static void qedr_sync_free_irqs(struct qedr_dev *dev) { u32 vector; u16 idx; int i; for (i = 0; i < dev->int_info.used_cnt; i++) { if (dev->int_info.msix_cnt) { idx = i * dev->num_hwfns + dev->affin_hwfn_idx; vector = dev->int_info.msix[idx].vector; free_irq(vector, &dev->cnq_array[i]); } } dev->int_info.used_cnt = 0; } static int qedr_req_msix_irqs(struct qedr_dev *dev) { int i, rc = 0; u16 idx; if (dev->num_cnq > dev->int_info.msix_cnt) { DP_ERR(dev, "Interrupt mismatch: %d CNQ queues > %d MSI-x vectors\n", dev->num_cnq, dev->int_info.msix_cnt); return -EINVAL; } for (i = 0; i < dev->num_cnq; i++) { idx = i * dev->num_hwfns + dev->affin_hwfn_idx; rc = request_irq(dev->int_info.msix[idx].vector, qedr_irq_handler, 0, dev->cnq_array[i].name, &dev->cnq_array[i]); if (rc) { DP_ERR(dev, "Request cnq %d irq failed\n", i); qedr_sync_free_irqs(dev); } else { DP_DEBUG(dev, QEDR_MSG_INIT, "Requested cnq irq for %s [entry %d]. Cookie is at %p\n", dev->cnq_array[i].name, i, &dev->cnq_array[i]); dev->int_info.used_cnt++; } } return rc; } static int qedr_setup_irqs(struct qedr_dev *dev) { int rc; DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs\n"); /* Learn Interrupt configuration */ rc = dev->ops->rdma_set_rdma_int(dev->cdev, dev->num_cnq); if (rc < 0) return rc; rc = dev->ops->rdma_get_rdma_int(dev->cdev, &dev->int_info); if (rc) { DP_DEBUG(dev, QEDR_MSG_INIT, "get_rdma_int failed\n"); return rc; } if (dev->int_info.msix_cnt) { DP_DEBUG(dev, QEDR_MSG_INIT, "rdma msix_cnt = %d\n", dev->int_info.msix_cnt); rc = qedr_req_msix_irqs(dev); if (rc) return rc; } DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs succeeded\n"); return 0; } static int qedr_set_device_attr(struct qedr_dev *dev) { struct qed_rdma_device *qed_attr; struct qedr_device_attr *attr; u32 page_size; /* Part 1 - query core capabilities */ qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx); /* Part 2 - check capabilities */ page_size = ~qed_attr->page_size_caps + 1; if (page_size > PAGE_SIZE) { DP_ERR(dev, "Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n", PAGE_SIZE, page_size); return -ENODEV; } /* Part 3 - copy and update capabilities */ attr = &dev->attr; attr->vendor_id = qed_attr->vendor_id; attr->vendor_part_id = qed_attr->vendor_part_id; attr->hw_ver = qed_attr->hw_ver; attr->fw_ver = qed_attr->fw_ver; attr->node_guid = qed_attr->node_guid; attr->sys_image_guid = qed_attr->sys_image_guid; attr->max_cnq = qed_attr->max_cnq; attr->max_sge = qed_attr->max_sge; attr->max_inline = qed_attr->max_inline; attr->max_sqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_SQE); attr->max_rqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_RQE); attr->max_qp_resp_rd_atomic_resc = qed_attr->max_qp_resp_rd_atomic_resc; attr->max_qp_req_rd_atomic_resc = qed_attr->max_qp_req_rd_atomic_resc; attr->max_dev_resp_rd_atomic_resc = qed_attr->max_dev_resp_rd_atomic_resc; attr->max_cq = qed_attr->max_cq; attr->max_qp = qed_attr->max_qp; attr->max_mr = qed_attr->max_mr; attr->max_mr_size = qed_attr->max_mr_size; attr->max_cqe = min_t(u64, qed_attr->max_cqe, QEDR_MAX_CQES); attr->max_mw = qed_attr->max_mw; attr->max_mr_mw_fmr_pbl = qed_attr->max_mr_mw_fmr_pbl; attr->max_mr_mw_fmr_size = qed_attr->max_mr_mw_fmr_size; attr->max_pd = qed_attr->max_pd; attr->max_ah = qed_attr->max_ah; attr->max_pkey = qed_attr->max_pkey; attr->max_srq = qed_attr->max_srq; attr->max_srq_wr = qed_attr->max_srq_wr; attr->dev_caps = qed_attr->dev_caps; attr->page_size_caps = qed_attr->page_size_caps; attr->dev_ack_delay = qed_attr->dev_ack_delay; attr->reserved_lkey = qed_attr->reserved_lkey; attr->bad_pkey_counter = qed_attr->bad_pkey_counter; attr->max_stats_queues = qed_attr->max_stats_queues; return 0; } static void qedr_unaffiliated_event(void *context, u8 event_code) { pr_err("unaffiliated event not implemented yet\n"); } static void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle) { #define EVENT_TYPE_NOT_DEFINED 0 #define EVENT_TYPE_CQ 1 #define EVENT_TYPE_QP 2 #define EVENT_TYPE_SRQ 3 struct qedr_dev *dev = (struct qedr_dev *)context; struct regpair *async_handle = (struct regpair *)fw_handle; u64 roce_handle64 = ((u64) async_handle->hi << 32) + async_handle->lo; u8 event_type = EVENT_TYPE_NOT_DEFINED; struct ib_event event; struct ib_srq *ibsrq; struct qedr_srq *srq; unsigned long flags; struct ib_cq *ibcq; struct ib_qp *ibqp; struct qedr_cq *cq; struct qedr_qp *qp; u16 srq_id; if (IS_ROCE(dev)) { switch (e_code) { case ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR: event.event = IB_EVENT_CQ_ERR; event_type = EVENT_TYPE_CQ; break; case ROCE_ASYNC_EVENT_SQ_DRAINED: event.event = IB_EVENT_SQ_DRAINED; event_type = EVENT_TYPE_QP; break; case ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR: event.event = IB_EVENT_QP_FATAL; event_type = EVENT_TYPE_QP; break; case ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR: event.event = IB_EVENT_QP_REQ_ERR; event_type = EVENT_TYPE_QP; break; case ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR: event.event = IB_EVENT_QP_ACCESS_ERR; event_type = EVENT_TYPE_QP; break; case ROCE_ASYNC_EVENT_SRQ_LIMIT: event.event = IB_EVENT_SRQ_LIMIT_REACHED; event_type = EVENT_TYPE_SRQ; break; case ROCE_ASYNC_EVENT_SRQ_EMPTY: event.event = IB_EVENT_SRQ_ERR; event_type = EVENT_TYPE_SRQ; break; case ROCE_ASYNC_EVENT_XRC_DOMAIN_ERR: event.event = IB_EVENT_QP_ACCESS_ERR; event_type = EVENT_TYPE_QP; break; case ROCE_ASYNC_EVENT_INVALID_XRCETH_ERR: event.event = IB_EVENT_QP_ACCESS_ERR; event_type = EVENT_TYPE_QP; break; case ROCE_ASYNC_EVENT_XRC_SRQ_CATASTROPHIC_ERR: event.event = IB_EVENT_CQ_ERR; event_type = EVENT_TYPE_CQ; break; default: DP_ERR(dev, "unsupported event %d on handle=%llx\n", e_code, roce_handle64); } } else { switch (e_code) { case QED_IWARP_EVENT_SRQ_LIMIT: event.event = IB_EVENT_SRQ_LIMIT_REACHED; event_type = EVENT_TYPE_SRQ; break; case QED_IWARP_EVENT_SRQ_EMPTY: event.event = IB_EVENT_SRQ_ERR; event_type = EVENT_TYPE_SRQ; break; default: DP_ERR(dev, "unsupported event %d on handle=%llx\n", e_code, roce_handle64); } } switch (event_type) { case EVENT_TYPE_CQ: cq = (struct qedr_cq *)(uintptr_t)roce_handle64; if (cq) { ibcq = &cq->ibcq; if (ibcq->event_handler) { event.device = ibcq->device; event.element.cq = ibcq; ibcq->event_handler(&event, ibcq->cq_context); } } else { WARN(1, "Error: CQ event with NULL pointer ibcq. Handle=%llx\n", roce_handle64); } DP_ERR(dev, "CQ event %d on handle %p\n", e_code, cq); break; case EVENT_TYPE_QP: qp = (struct qedr_qp *)(uintptr_t)roce_handle64; if (qp) { ibqp = &qp->ibqp; if (ibqp->event_handler) { event.device = ibqp->device; event.element.qp = ibqp; ibqp->event_handler(&event, ibqp->qp_context); } } else { WARN(1, "Error: QP event with NULL pointer ibqp. Handle=%llx\n", roce_handle64); } DP_ERR(dev, "QP event %d on handle %p\n", e_code, qp); break; case EVENT_TYPE_SRQ: srq_id = (u16)roce_handle64; xa_lock_irqsave(&dev->srqs, flags); srq = xa_load(&dev->srqs, srq_id); if (srq) { ibsrq = &srq->ibsrq; if (ibsrq->event_handler) { event.device = ibsrq->device; event.element.srq = ibsrq; ibsrq->event_handler(&event, ibsrq->srq_context); } } else { DP_NOTICE(dev, "SRQ event with NULL pointer ibsrq. Handle=%llx\n", roce_handle64); } xa_unlock_irqrestore(&dev->srqs, flags); DP_NOTICE(dev, "SRQ event %d on handle %p\n", e_code, srq); break; default: break; } } static int qedr_init_hw(struct qedr_dev *dev) { struct qed_rdma_add_user_out_params out_params; struct qed_rdma_start_in_params *in_params; struct qed_rdma_cnq_params *cur_pbl; struct qed_rdma_events events; dma_addr_t p_phys_table; u32 page_cnt; int rc = 0; int i; in_params = kzalloc(sizeof(*in_params), GFP_KERNEL); if (!in_params) { rc = -ENOMEM; goto out; } in_params->desired_cnq = dev->num_cnq; for (i = 0; i < dev->num_cnq; i++) { cur_pbl = &in_params->cnq_pbl_list[i]; page_cnt = qed_chain_get_page_cnt(&dev->cnq_array[i].pbl); cur_pbl->num_pbl_pages = page_cnt; p_phys_table = qed_chain_get_pbl_phys(&dev->cnq_array[i].pbl); cur_pbl->pbl_ptr = (u64)p_phys_table; } events.affiliated_event = qedr_affiliated_event; events.unaffiliated_event = qedr_unaffiliated_event; events.context = dev; in_params->events = &events; in_params->cq_mode = QED_RDMA_CQ_MODE_32_BITS; in_params->max_mtu = dev->ndev->mtu; dev->iwarp_max_mtu = dev->ndev->mtu; ether_addr_copy(&in_params->mac_addr[0], dev->ndev->dev_addr); rc = dev->ops->rdma_init(dev->cdev, in_params); if (rc) goto out; rc = dev->ops->rdma_add_user(dev->rdma_ctx, &out_params); if (rc) goto out; dev->db_addr = out_params.dpi_addr; dev->db_phys_addr = out_params.dpi_phys_addr; dev->db_size = out_params.dpi_size; dev->dpi = out_params.dpi; rc = qedr_set_device_attr(dev); out: kfree(in_params); if (rc) DP_ERR(dev, "Init HW Failed rc = %d\n", rc); return rc; } static void qedr_stop_hw(struct qedr_dev *dev) { dev->ops->rdma_remove_user(dev->rdma_ctx, dev->dpi); dev->ops->rdma_stop(dev->rdma_ctx); } static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev, struct net_device *ndev) { struct qed_dev_rdma_info dev_info; struct qedr_dev *dev; int rc = 0; dev = ib_alloc_device(qedr_dev, ibdev); if (!dev) { pr_err("Unable to allocate ib device\n"); return NULL; } DP_DEBUG(dev, QEDR_MSG_INIT, "qedr add device called\n"); dev->pdev = pdev; dev->ndev = ndev; dev->cdev = cdev; qed_ops = qed_get_rdma_ops(); if (!qed_ops) { DP_ERR(dev, "Failed to get qed roce operations\n"); goto init_err; } dev->ops = qed_ops; rc = qed_ops->fill_dev_info(cdev, &dev_info); if (rc) goto init_err; dev->user_dpm_enabled = dev_info.user_dpm_enabled; dev->rdma_type = dev_info.rdma_type; dev->num_hwfns = dev_info.common.num_hwfns; if (IS_IWARP(dev) && QEDR_IS_CMT(dev)) { rc = dev->ops->iwarp_set_engine_affin(cdev, false); if (rc) { DP_ERR(dev, "iWARP is disabled over a 100g device Enabling it may impact L2 performance. To enable it run devlink dev param set <dev> name iwarp_cmt value true cmode runtime\n"); goto init_err; } } dev->affin_hwfn_idx = dev->ops->common->get_affin_hwfn_idx(cdev); dev->rdma_ctx = dev->ops->rdma_get_rdma_ctx(cdev); dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev); if (!dev->num_cnq) { DP_ERR(dev, "Failed. At least one CNQ is required.\n"); rc = -ENOMEM; goto init_err; } dev->wq_multiplier = QEDR_WQ_MULTIPLIER_DFT; qedr_pci_set_atomic(dev, pdev); rc = qedr_alloc_resources(dev); if (rc) goto init_err; rc = qedr_init_hw(dev); if (rc) goto alloc_err; rc = qedr_setup_irqs(dev); if (rc) goto irq_err; rc = qedr_register_device(dev); if (rc) { DP_ERR(dev, "Unable to allocate register device\n"); goto reg_err; } if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state)) qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE); DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n"); return dev; reg_err: qedr_sync_free_irqs(dev); irq_err: qedr_stop_hw(dev); alloc_err: qedr_free_resources(dev); init_err: ib_dealloc_device(&dev->ibdev); DP_ERR(dev, "qedr driver load failed rc=%d\n", rc); return NULL; } static void qedr_remove(struct qedr_dev *dev) { /* First unregister with stack to stop all the active traffic * of the registered clients. */ ib_unregister_device(&dev->ibdev); qedr_stop_hw(dev); qedr_sync_free_irqs(dev); qedr_free_resources(dev); if (IS_IWARP(dev) && QEDR_IS_CMT(dev)) dev->ops->iwarp_set_engine_affin(dev->cdev, true); ib_dealloc_device(&dev->ibdev); } static void qedr_close(struct qedr_dev *dev) { if (test_and_clear_bit(QEDR_ENET_STATE_BIT, &dev->enet_state)) qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ERR); } static void qedr_shutdown(struct qedr_dev *dev) { qedr_close(dev); qedr_remove(dev); } static void qedr_open(struct qedr_dev *dev) { if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state)) qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE); } static void qedr_mac_address_change(struct qedr_dev *dev) { union ib_gid *sgid = &dev->sgid_tbl[0]; u8 guid[8], mac_addr[6]; int rc; /* Update SGID */ ether_addr_copy(&mac_addr[0], dev->ndev->dev_addr); guid[0] = mac_addr[0] ^ 2; guid[1] = mac_addr[1]; guid[2] = mac_addr[2]; guid[3] = 0xff; guid[4] = 0xfe; guid[5] = mac_addr[3]; guid[6] = mac_addr[4]; guid[7] = mac_addr[5]; sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL); memcpy(&sgid->raw[8], guid, sizeof(guid)); /* Update LL2 */ rc = dev->ops->ll2_set_mac_filter(dev->cdev, dev->gsi_ll2_mac_address, dev->ndev->dev_addr); ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr); qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_GID_CHANGE); if (rc) DP_ERR(dev, "Error updating mac filter\n"); } /* event handling via NIC driver ensures that all the NIC specific * initialization done before RoCE driver notifies * event to stack. */ static void qedr_notify(struct qedr_dev *dev, enum qede_rdma_event event) { switch (event) { case QEDE_UP: qedr_open(dev); break; case QEDE_DOWN: qedr_close(dev); break; case QEDE_CLOSE: qedr_shutdown(dev); break; case QEDE_CHANGE_ADDR: qedr_mac_address_change(dev); break; case QEDE_CHANGE_MTU: if (rdma_protocol_iwarp(&dev->ibdev, 1)) if (dev->ndev->mtu != dev->iwarp_max_mtu) DP_NOTICE(dev, "Mtu was changed from %d to %d. This will not take affect for iWARP until qedr is reloaded\n", dev->iwarp_max_mtu, dev->ndev->mtu); break; default: pr_err("Event not supported\n"); } } static struct qedr_driver qedr_drv = { .name = "qedr_driver", .add = qedr_add, .remove = qedr_remove, .notify = qedr_notify, }; static int __init qedr_init_module(void) { return qede_rdma_register_driver(&qedr_drv); } static void __exit qedr_exit_module(void) { qede_rdma_unregister_driver(&qedr_drv); } module_init(qedr_init_module); module_exit(qedr_exit_module);
linux-master
drivers/infiniband/hw/qedr/main.c
/* QLogic qedr NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and /or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <net/ip.h> #include <net/ipv6.h> #include <net/udp.h> #include <net/addrconf.h> #include <net/route.h> #include <net/ip6_route.h> #include <net/flow.h> #include "qedr.h" #include "qedr_iw_cm.h" static inline void qedr_fill_sockaddr4(const struct qed_iwarp_cm_info *cm_info, struct iw_cm_event *event) { struct sockaddr_in *laddr = (struct sockaddr_in *)&event->local_addr; struct sockaddr_in *raddr = (struct sockaddr_in *)&event->remote_addr; laddr->sin_family = AF_INET; raddr->sin_family = AF_INET; laddr->sin_port = htons(cm_info->local_port); raddr->sin_port = htons(cm_info->remote_port); laddr->sin_addr.s_addr = htonl(cm_info->local_ip[0]); raddr->sin_addr.s_addr = htonl(cm_info->remote_ip[0]); } static inline void qedr_fill_sockaddr6(const struct qed_iwarp_cm_info *cm_info, struct iw_cm_event *event) { struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)&event->local_addr; struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)&event->remote_addr; int i; laddr6->sin6_family = AF_INET6; raddr6->sin6_family = AF_INET6; laddr6->sin6_port = htons(cm_info->local_port); raddr6->sin6_port = htons(cm_info->remote_port); for (i = 0; i < 4; i++) { laddr6->sin6_addr.in6_u.u6_addr32[i] = htonl(cm_info->local_ip[i]); raddr6->sin6_addr.in6_u.u6_addr32[i] = htonl(cm_info->remote_ip[i]); } } static void qedr_iw_free_qp(struct kref *ref) { struct qedr_qp *qp = container_of(ref, struct qedr_qp, refcnt); complete(&qp->qp_rel_comp); } static void qedr_iw_free_ep(struct kref *ref) { struct qedr_iw_ep *ep = container_of(ref, struct qedr_iw_ep, refcnt); if (ep->qp) kref_put(&ep->qp->refcnt, qedr_iw_free_qp); if (ep->cm_id) ep->cm_id->rem_ref(ep->cm_id); kfree(ep); } static void qedr_iw_mpa_request(void *context, struct qed_iwarp_cm_event_params *params) { struct qedr_iw_listener *listener = (struct qedr_iw_listener *)context; struct qedr_dev *dev = listener->dev; struct iw_cm_event event; struct qedr_iw_ep *ep; ep = kzalloc(sizeof(*ep), GFP_ATOMIC); if (!ep) return; ep->dev = dev; ep->qed_context = params->ep_context; kref_init(&ep->refcnt); memset(&event, 0, sizeof(event)); event.event = IW_CM_EVENT_CONNECT_REQUEST; event.status = params->status; if (!IS_ENABLED(CONFIG_IPV6) || params->cm_info->ip_version == QED_TCP_IPV4) qedr_fill_sockaddr4(params->cm_info, &event); else qedr_fill_sockaddr6(params->cm_info, &event); event.provider_data = (void *)ep; event.private_data = (void *)params->cm_info->private_data; event.private_data_len = (u8)params->cm_info->private_data_len; event.ord = params->cm_info->ord; event.ird = params->cm_info->ird; listener->cm_id->event_handler(listener->cm_id, &event); } static void qedr_iw_issue_event(void *context, struct qed_iwarp_cm_event_params *params, enum iw_cm_event_type event_type) { struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context; struct iw_cm_event event; memset(&event, 0, sizeof(event)); event.status = params->status; event.event = event_type; if (params->cm_info) { event.ird = params->cm_info->ird; event.ord = params->cm_info->ord; /* Only connect_request and reply have valid private data * the rest of the events this may be left overs from * connection establishment. CONNECT_REQUEST is issued via * qedr_iw_mpa_request */ if (event_type == IW_CM_EVENT_CONNECT_REPLY) { event.private_data_len = params->cm_info->private_data_len; event.private_data = (void *)params->cm_info->private_data; } } if (ep->cm_id) ep->cm_id->event_handler(ep->cm_id, &event); } static void qedr_iw_close_event(void *context, struct qed_iwarp_cm_event_params *params) { struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context; if (ep->cm_id) qedr_iw_issue_event(context, params, IW_CM_EVENT_CLOSE); kref_put(&ep->refcnt, qedr_iw_free_ep); } static void qedr_iw_qp_event(void *context, struct qed_iwarp_cm_event_params *params, enum ib_event_type ib_event, char *str) { struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context; struct qedr_dev *dev = ep->dev; struct ib_qp *ibqp = &ep->qp->ibqp; struct ib_event event; DP_NOTICE(dev, "QP error received: %s\n", str); if (ibqp->event_handler) { event.event = ib_event; event.device = ibqp->device; event.element.qp = ibqp; ibqp->event_handler(&event, ibqp->qp_context); } } struct qedr_discon_work { struct work_struct work; struct qedr_iw_ep *ep; enum qed_iwarp_event_type event; int status; }; static void qedr_iw_disconnect_worker(struct work_struct *work) { struct qedr_discon_work *dwork = container_of(work, struct qedr_discon_work, work); struct qed_rdma_modify_qp_in_params qp_params = { 0 }; struct qedr_iw_ep *ep = dwork->ep; struct qedr_dev *dev = ep->dev; struct qedr_qp *qp = ep->qp; struct iw_cm_event event; /* The qp won't be released until we release the ep. * the ep's refcnt was increased before calling this * function, therefore it is safe to access qp */ if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_DISCONNECT, &qp->iwarp_cm_flags)) goto out; memset(&event, 0, sizeof(event)); event.status = dwork->status; event.event = IW_CM_EVENT_DISCONNECT; /* Success means graceful disconnect was requested. modifying * to SQD is translated to graceful disconnect. O/w reset is sent */ if (dwork->status) qp_params.new_state = QED_ROCE_QP_STATE_ERR; else qp_params.new_state = QED_ROCE_QP_STATE_SQD; if (ep->cm_id) ep->cm_id->event_handler(ep->cm_id, &event); SET_FIELD(qp_params.modify_flags, QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1); dev->ops->rdma_modify_qp(dev->rdma_ctx, qp->qed_qp, &qp_params); complete(&ep->qp->iwarp_cm_comp); out: kfree(dwork); kref_put(&ep->refcnt, qedr_iw_free_ep); } static void qedr_iw_disconnect_event(void *context, struct qed_iwarp_cm_event_params *params) { struct qedr_discon_work *work; struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context; struct qedr_dev *dev = ep->dev; work = kzalloc(sizeof(*work), GFP_ATOMIC); if (!work) return; /* We can't get a close event before disconnect, but since * we're scheduling a work queue we need to make sure close * won't delete the ep, so we increase the refcnt */ kref_get(&ep->refcnt); work->ep = ep; work->event = params->event; work->status = params->status; INIT_WORK(&work->work, qedr_iw_disconnect_worker); queue_work(dev->iwarp_wq, &work->work); } static void qedr_iw_passive_complete(void *context, struct qed_iwarp_cm_event_params *params) { struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context; struct qedr_dev *dev = ep->dev; /* We will only reach the following state if MPA_REJECT was called on * passive. In this case there will be no associated QP. */ if ((params->status == -ECONNREFUSED) && (!ep->qp)) { DP_DEBUG(dev, QEDR_MSG_IWARP, "PASSIVE connection refused releasing ep...\n"); kref_put(&ep->refcnt, qedr_iw_free_ep); return; } complete(&ep->qp->iwarp_cm_comp); qedr_iw_issue_event(context, params, IW_CM_EVENT_ESTABLISHED); if (params->status < 0) qedr_iw_close_event(context, params); } static void qedr_iw_active_complete(void *context, struct qed_iwarp_cm_event_params *params) { struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context; complete(&ep->qp->iwarp_cm_comp); qedr_iw_issue_event(context, params, IW_CM_EVENT_CONNECT_REPLY); if (params->status < 0) kref_put(&ep->refcnt, qedr_iw_free_ep); } static int qedr_iw_mpa_reply(void *context, struct qed_iwarp_cm_event_params *params) { struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context; struct qedr_dev *dev = ep->dev; struct qed_iwarp_send_rtr_in rtr_in; rtr_in.ep_context = params->ep_context; return dev->ops->iwarp_send_rtr(dev->rdma_ctx, &rtr_in); } static int qedr_iw_event_handler(void *context, struct qed_iwarp_cm_event_params *params) { struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context; struct qedr_dev *dev = ep->dev; switch (params->event) { case QED_IWARP_EVENT_MPA_REQUEST: qedr_iw_mpa_request(context, params); break; case QED_IWARP_EVENT_ACTIVE_MPA_REPLY: qedr_iw_mpa_reply(context, params); break; case QED_IWARP_EVENT_PASSIVE_COMPLETE: qedr_iw_passive_complete(context, params); break; case QED_IWARP_EVENT_ACTIVE_COMPLETE: qedr_iw_active_complete(context, params); break; case QED_IWARP_EVENT_DISCONNECT: qedr_iw_disconnect_event(context, params); break; case QED_IWARP_EVENT_CLOSE: qedr_iw_close_event(context, params); break; case QED_IWARP_EVENT_RQ_EMPTY: qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL, "QED_IWARP_EVENT_RQ_EMPTY"); break; case QED_IWARP_EVENT_IRQ_FULL: qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL, "QED_IWARP_EVENT_IRQ_FULL"); break; case QED_IWARP_EVENT_LLP_TIMEOUT: qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL, "QED_IWARP_EVENT_LLP_TIMEOUT"); break; case QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR: qedr_iw_qp_event(context, params, IB_EVENT_QP_ACCESS_ERR, "QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR"); break; case QED_IWARP_EVENT_CQ_OVERFLOW: qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL, "QED_IWARP_EVENT_CQ_OVERFLOW"); break; case QED_IWARP_EVENT_QP_CATASTROPHIC: qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL, "QED_IWARP_EVENT_QP_CATASTROPHIC"); break; case QED_IWARP_EVENT_LOCAL_ACCESS_ERROR: qedr_iw_qp_event(context, params, IB_EVENT_QP_ACCESS_ERR, "QED_IWARP_EVENT_LOCAL_ACCESS_ERROR"); break; case QED_IWARP_EVENT_REMOTE_OPERATION_ERROR: qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL, "QED_IWARP_EVENT_REMOTE_OPERATION_ERROR"); break; case QED_IWARP_EVENT_TERMINATE_RECEIVED: DP_NOTICE(dev, "Got terminate message\n"); break; default: DP_NOTICE(dev, "Unknown event received %d\n", params->event); break; } return 0; } static u16 qedr_iw_get_vlan_ipv4(struct qedr_dev *dev, u32 *addr) { struct net_device *ndev; u16 vlan_id = 0; ndev = ip_dev_find(&init_net, htonl(addr[0])); if (ndev) { vlan_id = rdma_vlan_dev_vlan_id(ndev); dev_put(ndev); } if (vlan_id == 0xffff) vlan_id = 0; return vlan_id; } static u16 qedr_iw_get_vlan_ipv6(u32 *addr) { struct net_device *ndev = NULL; struct in6_addr laddr6; u16 vlan_id = 0; int i; if (!IS_ENABLED(CONFIG_IPV6)) return vlan_id; for (i = 0; i < 4; i++) laddr6.in6_u.u6_addr32[i] = htonl(addr[i]); rcu_read_lock(); for_each_netdev_rcu(&init_net, ndev) { if (ipv6_chk_addr(&init_net, &laddr6, ndev, 1)) { vlan_id = rdma_vlan_dev_vlan_id(ndev); break; } } rcu_read_unlock(); if (vlan_id == 0xffff) vlan_id = 0; return vlan_id; } static int qedr_addr4_resolve(struct qedr_dev *dev, struct sockaddr_in *src_in, struct sockaddr_in *dst_in, u8 *dst_mac) { __be32 src_ip = src_in->sin_addr.s_addr; __be32 dst_ip = dst_in->sin_addr.s_addr; struct neighbour *neigh = NULL; struct rtable *rt = NULL; int rc = 0; rt = ip_route_output(&init_net, dst_ip, src_ip, 0, 0); if (IS_ERR(rt)) { DP_ERR(dev, "ip_route_output returned error\n"); return -EINVAL; } neigh = dst_neigh_lookup(&rt->dst, &dst_ip); if (neigh) { rcu_read_lock(); if (neigh->nud_state & NUD_VALID) { ether_addr_copy(dst_mac, neigh->ha); DP_DEBUG(dev, QEDR_MSG_QP, "mac_addr=[%pM]\n", dst_mac); } else { neigh_event_send(neigh, NULL); } rcu_read_unlock(); neigh_release(neigh); } ip_rt_put(rt); return rc; } static int qedr_addr6_resolve(struct qedr_dev *dev, struct sockaddr_in6 *src_in, struct sockaddr_in6 *dst_in, u8 *dst_mac) { struct neighbour *neigh = NULL; struct dst_entry *dst; struct flowi6 fl6; int rc = 0; memset(&fl6, 0, sizeof(fl6)); fl6.daddr = dst_in->sin6_addr; fl6.saddr = src_in->sin6_addr; dst = ip6_route_output(&init_net, NULL, &fl6); if ((!dst) || dst->error) { if (dst) { DP_ERR(dev, "ip6_route_output returned dst->error = %d\n", dst->error); dst_release(dst); } return -EINVAL; } neigh = dst_neigh_lookup(dst, &fl6.daddr); if (neigh) { rcu_read_lock(); if (neigh->nud_state & NUD_VALID) { ether_addr_copy(dst_mac, neigh->ha); DP_DEBUG(dev, QEDR_MSG_QP, "mac_addr=[%pM]\n", dst_mac); } else { neigh_event_send(neigh, NULL); } rcu_read_unlock(); neigh_release(neigh); } dst_release(dst); return rc; } static struct qedr_qp *qedr_iw_load_qp(struct qedr_dev *dev, u32 qpn) { struct qedr_qp *qp; xa_lock(&dev->qps); qp = xa_load(&dev->qps, qpn); if (qp) kref_get(&qp->refcnt); xa_unlock(&dev->qps); return qp; } int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) { struct qedr_dev *dev = get_qedr_dev(cm_id->device); struct qed_iwarp_connect_out out_params; struct qed_iwarp_connect_in in_params; struct qed_iwarp_cm_info *cm_info; struct sockaddr_in6 *laddr6; struct sockaddr_in6 *raddr6; struct sockaddr_in *laddr; struct sockaddr_in *raddr; struct qedr_iw_ep *ep; struct qedr_qp *qp; int rc = 0; int i; laddr = (struct sockaddr_in *)&cm_id->m_local_addr; raddr = (struct sockaddr_in *)&cm_id->m_remote_addr; laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr; raddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr; DP_DEBUG(dev, QEDR_MSG_IWARP, "MAPPED %d %d\n", ntohs(((struct sockaddr_in *)&cm_id->remote_addr)->sin_port), ntohs(raddr->sin_port)); DP_DEBUG(dev, QEDR_MSG_IWARP, "Connect source address: %pISpc, remote address: %pISpc\n", &cm_id->local_addr, &cm_id->remote_addr); if (!laddr->sin_port || !raddr->sin_port) return -EINVAL; ep = kzalloc(sizeof(*ep), GFP_KERNEL); if (!ep) return -ENOMEM; ep->dev = dev; kref_init(&ep->refcnt); qp = qedr_iw_load_qp(dev, conn_param->qpn); if (!qp) { rc = -EINVAL; goto err; } ep->qp = qp; cm_id->add_ref(cm_id); ep->cm_id = cm_id; in_params.event_cb = qedr_iw_event_handler; in_params.cb_context = ep; cm_info = &in_params.cm_info; memset(cm_info->local_ip, 0, sizeof(cm_info->local_ip)); memset(cm_info->remote_ip, 0, sizeof(cm_info->remote_ip)); if (!IS_ENABLED(CONFIG_IPV6) || cm_id->remote_addr.ss_family == AF_INET) { cm_info->ip_version = QED_TCP_IPV4; cm_info->remote_ip[0] = ntohl(raddr->sin_addr.s_addr); cm_info->local_ip[0] = ntohl(laddr->sin_addr.s_addr); cm_info->remote_port = ntohs(raddr->sin_port); cm_info->local_port = ntohs(laddr->sin_port); cm_info->vlan = qedr_iw_get_vlan_ipv4(dev, cm_info->local_ip); rc = qedr_addr4_resolve(dev, laddr, raddr, (u8 *)in_params.remote_mac_addr); in_params.mss = dev->iwarp_max_mtu - (sizeof(struct iphdr) + sizeof(struct tcphdr)); } else { in_params.cm_info.ip_version = QED_TCP_IPV6; for (i = 0; i < 4; i++) { cm_info->remote_ip[i] = ntohl(raddr6->sin6_addr.in6_u.u6_addr32[i]); cm_info->local_ip[i] = ntohl(laddr6->sin6_addr.in6_u.u6_addr32[i]); } cm_info->local_port = ntohs(laddr6->sin6_port); cm_info->remote_port = ntohs(raddr6->sin6_port); in_params.mss = dev->iwarp_max_mtu - (sizeof(struct ipv6hdr) + sizeof(struct tcphdr)); cm_info->vlan = qedr_iw_get_vlan_ipv6(cm_info->local_ip); rc = qedr_addr6_resolve(dev, laddr6, raddr6, (u8 *)in_params.remote_mac_addr); } if (rc) goto err; DP_DEBUG(dev, QEDR_MSG_IWARP, "ord = %d ird=%d private_data=%p private_data_len=%d rq_psn=%d\n", conn_param->ord, conn_param->ird, conn_param->private_data, conn_param->private_data_len, qp->rq_psn); cm_info->ord = conn_param->ord; cm_info->ird = conn_param->ird; cm_info->private_data = conn_param->private_data; cm_info->private_data_len = conn_param->private_data_len; in_params.qp = qp->qed_qp; memcpy(in_params.local_mac_addr, dev->ndev->dev_addr, ETH_ALEN); if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT, &qp->iwarp_cm_flags)) { rc = -ENODEV; goto err; /* QP already being destroyed */ } rc = dev->ops->iwarp_connect(dev->rdma_ctx, &in_params, &out_params); if (rc) { complete(&qp->iwarp_cm_comp); goto err; } return rc; err: kref_put(&ep->refcnt, qedr_iw_free_ep); return rc; } int qedr_iw_create_listen(struct iw_cm_id *cm_id, int backlog) { struct qedr_dev *dev = get_qedr_dev(cm_id->device); struct qedr_iw_listener *listener; struct qed_iwarp_listen_in iparams; struct qed_iwarp_listen_out oparams; struct sockaddr_in *laddr; struct sockaddr_in6 *laddr6; int rc; int i; laddr = (struct sockaddr_in *)&cm_id->m_local_addr; laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr; DP_DEBUG(dev, QEDR_MSG_IWARP, "Create Listener address: %pISpc\n", &cm_id->local_addr); listener = kzalloc(sizeof(*listener), GFP_KERNEL); if (!listener) return -ENOMEM; listener->dev = dev; cm_id->add_ref(cm_id); listener->cm_id = cm_id; listener->backlog = backlog; iparams.cb_context = listener; iparams.event_cb = qedr_iw_event_handler; iparams.max_backlog = backlog; if (!IS_ENABLED(CONFIG_IPV6) || cm_id->local_addr.ss_family == AF_INET) { iparams.ip_version = QED_TCP_IPV4; memset(iparams.ip_addr, 0, sizeof(iparams.ip_addr)); iparams.ip_addr[0] = ntohl(laddr->sin_addr.s_addr); iparams.port = ntohs(laddr->sin_port); iparams.vlan = qedr_iw_get_vlan_ipv4(dev, iparams.ip_addr); } else { iparams.ip_version = QED_TCP_IPV6; for (i = 0; i < 4; i++) { iparams.ip_addr[i] = ntohl(laddr6->sin6_addr.in6_u.u6_addr32[i]); } iparams.port = ntohs(laddr6->sin6_port); iparams.vlan = qedr_iw_get_vlan_ipv6(iparams.ip_addr); } rc = dev->ops->iwarp_create_listen(dev->rdma_ctx, &iparams, &oparams); if (rc) goto err; listener->qed_handle = oparams.handle; cm_id->provider_data = listener; return rc; err: cm_id->rem_ref(cm_id); kfree(listener); return rc; } int qedr_iw_destroy_listen(struct iw_cm_id *cm_id) { struct qedr_iw_listener *listener = cm_id->provider_data; struct qedr_dev *dev = get_qedr_dev(cm_id->device); int rc = 0; if (listener->qed_handle) rc = dev->ops->iwarp_destroy_listen(dev->rdma_ctx, listener->qed_handle); cm_id->rem_ref(cm_id); kfree(listener); return rc; } int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) { struct qedr_iw_ep *ep = (struct qedr_iw_ep *)cm_id->provider_data; struct qedr_dev *dev = ep->dev; struct qedr_qp *qp; struct qed_iwarp_accept_in params; int rc; DP_DEBUG(dev, QEDR_MSG_IWARP, "Accept on qpid=%d\n", conn_param->qpn); qp = qedr_iw_load_qp(dev, conn_param->qpn); if (!qp) { DP_ERR(dev, "Invalid QP number %d\n", conn_param->qpn); return -EINVAL; } ep->qp = qp; cm_id->add_ref(cm_id); ep->cm_id = cm_id; params.ep_context = ep->qed_context; params.cb_context = ep; params.qp = ep->qp->qed_qp; params.private_data = conn_param->private_data; params.private_data_len = conn_param->private_data_len; params.ird = conn_param->ird; params.ord = conn_param->ord; if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT, &qp->iwarp_cm_flags)) { rc = -EINVAL; goto err; /* QP already destroyed */ } rc = dev->ops->iwarp_accept(dev->rdma_ctx, &params); if (rc) { complete(&qp->iwarp_cm_comp); goto err; } return rc; err: kref_put(&ep->refcnt, qedr_iw_free_ep); return rc; } int qedr_iw_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) { struct qedr_iw_ep *ep = (struct qedr_iw_ep *)cm_id->provider_data; struct qedr_dev *dev = ep->dev; struct qed_iwarp_reject_in params; params.ep_context = ep->qed_context; params.cb_context = ep; params.private_data = pdata; params.private_data_len = pdata_len; ep->qp = NULL; return dev->ops->iwarp_reject(dev->rdma_ctx, &params); } void qedr_iw_qp_add_ref(struct ib_qp *ibqp) { struct qedr_qp *qp = get_qedr_qp(ibqp); kref_get(&qp->refcnt); } void qedr_iw_qp_rem_ref(struct ib_qp *ibqp) { struct qedr_qp *qp = get_qedr_qp(ibqp); kref_put(&qp->refcnt, qedr_iw_free_qp); } struct ib_qp *qedr_iw_get_qp(struct ib_device *ibdev, int qpn) { struct qedr_dev *dev = get_qedr_dev(ibdev); return xa_load(&dev->qps, qpn); }
linux-master
drivers/infiniband/hw/qedr/qedr_iw_cm.c
/* QLogic qedr NIC Driver * Copyright (c) 2015-2016 QLogic Corporation * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and /or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/dma-mapping.h> #include <linux/crc32.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/udp.h> #include <linux/iommu.h> #include <rdma/ib_verbs.h> #include <rdma/ib_user_verbs.h> #include <rdma/iw_cm.h> #include <rdma/ib_umem.h> #include <rdma/ib_addr.h> #include <rdma/ib_cache.h> #include <rdma/uverbs_ioctl.h> #include <linux/qed/common_hsi.h> #include "qedr_hsi_rdma.h" #include <linux/qed/qed_if.h> #include "qedr.h" #include "verbs.h" #include <rdma/qedr-abi.h> #include "qedr_roce_cm.h" #include "qedr_iw_cm.h" #define QEDR_SRQ_WQE_ELEM_SIZE sizeof(union rdma_srq_elm) #define RDMA_MAX_SGE_PER_SRQ (4) #define RDMA_MAX_SRQ_WQE_SIZE (RDMA_MAX_SGE_PER_SRQ + 1) #define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT) enum { QEDR_USER_MMAP_IO_WC = 0, QEDR_USER_MMAP_PHYS_PAGE, }; static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len) { size_t min_len = min_t(size_t, len, udata->outlen); return ib_copy_to_udata(udata, src, min_len); } int qedr_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey) { if (index >= QEDR_ROCE_PKEY_TABLE_LEN) return -EINVAL; *pkey = QEDR_ROCE_PKEY_DEFAULT; return 0; } int qedr_iw_query_gid(struct ib_device *ibdev, u32 port, int index, union ib_gid *sgid) { struct qedr_dev *dev = get_qedr_dev(ibdev); memset(sgid->raw, 0, sizeof(sgid->raw)); ether_addr_copy(sgid->raw, dev->ndev->dev_addr); DP_DEBUG(dev, QEDR_MSG_INIT, "QUERY sgid[%d]=%llx:%llx\n", index, sgid->global.interface_id, sgid->global.subnet_prefix); return 0; } int qedr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) { struct qedr_dev *dev = get_qedr_dev(ibsrq->device); struct qedr_device_attr *qattr = &dev->attr; struct qedr_srq *srq = get_qedr_srq(ibsrq); srq_attr->srq_limit = srq->srq_limit; srq_attr->max_wr = qattr->max_srq_wr; srq_attr->max_sge = qattr->max_sge; return 0; } int qedr_query_device(struct ib_device *ibdev, struct ib_device_attr *attr, struct ib_udata *udata) { struct qedr_dev *dev = get_qedr_dev(ibdev); struct qedr_device_attr *qattr = &dev->attr; if (!dev->rdma_ctx) { DP_ERR(dev, "qedr_query_device called with invalid params rdma_ctx=%p\n", dev->rdma_ctx); return -EINVAL; } memset(attr, 0, sizeof(*attr)); attr->fw_ver = qattr->fw_ver; attr->sys_image_guid = qattr->sys_image_guid; attr->max_mr_size = qattr->max_mr_size; attr->page_size_cap = qattr->page_size_caps; attr->vendor_id = qattr->vendor_id; attr->vendor_part_id = qattr->vendor_part_id; attr->hw_ver = qattr->hw_ver; attr->max_qp = qattr->max_qp; attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe); attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD | IB_DEVICE_RC_RNR_NAK_GEN | IB_DEVICE_MEM_MGT_EXTENSIONS; attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY; if (!rdma_protocol_iwarp(&dev->ibdev, 1)) attr->device_cap_flags |= IB_DEVICE_XRC; attr->max_send_sge = qattr->max_sge; attr->max_recv_sge = qattr->max_sge; attr->max_sge_rd = qattr->max_sge; attr->max_cq = qattr->max_cq; attr->max_cqe = qattr->max_cqe; attr->max_mr = qattr->max_mr; attr->max_mw = qattr->max_mw; attr->max_pd = qattr->max_pd; attr->atomic_cap = dev->atomic_cap; attr->max_qp_init_rd_atom = 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1); attr->max_qp_rd_atom = min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1), attr->max_qp_init_rd_atom); attr->max_srq = qattr->max_srq; attr->max_srq_sge = qattr->max_srq_sge; attr->max_srq_wr = qattr->max_srq_wr; attr->local_ca_ack_delay = qattr->dev_ack_delay; attr->max_fast_reg_page_list_len = qattr->max_mr / 8; attr->max_pkeys = qattr->max_pkey; attr->max_ah = qattr->max_ah; return 0; } static inline void get_link_speed_and_width(int speed, u16 *ib_speed, u8 *ib_width) { switch (speed) { case 1000: *ib_speed = IB_SPEED_SDR; *ib_width = IB_WIDTH_1X; break; case 10000: *ib_speed = IB_SPEED_QDR; *ib_width = IB_WIDTH_1X; break; case 20000: *ib_speed = IB_SPEED_DDR; *ib_width = IB_WIDTH_4X; break; case 25000: *ib_speed = IB_SPEED_EDR; *ib_width = IB_WIDTH_1X; break; case 40000: *ib_speed = IB_SPEED_QDR; *ib_width = IB_WIDTH_4X; break; case 50000: *ib_speed = IB_SPEED_HDR; *ib_width = IB_WIDTH_1X; break; case 100000: *ib_speed = IB_SPEED_EDR; *ib_width = IB_WIDTH_4X; break; default: /* Unsupported */ *ib_speed = IB_SPEED_SDR; *ib_width = IB_WIDTH_1X; } } int qedr_query_port(struct ib_device *ibdev, u32 port, struct ib_port_attr *attr) { struct qedr_dev *dev; struct qed_rdma_port *rdma_port; dev = get_qedr_dev(ibdev); if (!dev->rdma_ctx) { DP_ERR(dev, "rdma_ctx is NULL\n"); return -EINVAL; } rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx); /* *attr being zeroed by the caller, avoid zeroing it here */ if (rdma_port->port_state == QED_RDMA_PORT_UP) { attr->state = IB_PORT_ACTIVE; attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP; } else { attr->state = IB_PORT_DOWN; attr->phys_state = IB_PORT_PHYS_STATE_DISABLED; } attr->max_mtu = IB_MTU_4096; attr->lid = 0; attr->lmc = 0; attr->sm_lid = 0; attr->sm_sl = 0; attr->ip_gids = true; if (rdma_protocol_iwarp(&dev->ibdev, 1)) { attr->active_mtu = iboe_get_mtu(dev->iwarp_max_mtu); attr->gid_tbl_len = 1; } else { attr->active_mtu = iboe_get_mtu(dev->ndev->mtu); attr->gid_tbl_len = QEDR_MAX_SGID; attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN; } attr->bad_pkey_cntr = rdma_port->pkey_bad_counter; attr->qkey_viol_cntr = 0; get_link_speed_and_width(rdma_port->link_speed, &attr->active_speed, &attr->active_width); attr->max_msg_sz = rdma_port->max_msg_size; attr->max_vl_num = 4; return 0; } int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) { struct ib_device *ibdev = uctx->device; int rc; struct qedr_ucontext *ctx = get_qedr_ucontext(uctx); struct qedr_alloc_ucontext_resp uresp = {}; struct qedr_alloc_ucontext_req ureq = {}; struct qedr_dev *dev = get_qedr_dev(ibdev); struct qed_rdma_add_user_out_params oparams; struct qedr_user_mmap_entry *entry; if (!udata) return -EFAULT; if (udata->inlen) { rc = ib_copy_from_udata(&ureq, udata, min(sizeof(ureq), udata->inlen)); if (rc) { DP_ERR(dev, "Problem copying data from user space\n"); return -EFAULT; } ctx->edpm_mode = !!(ureq.context_flags & QEDR_ALLOC_UCTX_EDPM_MODE); ctx->db_rec = !!(ureq.context_flags & QEDR_ALLOC_UCTX_DB_REC); } rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams); if (rc) { DP_ERR(dev, "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n", rc); return rc; } ctx->dpi = oparams.dpi; ctx->dpi_addr = oparams.dpi_addr; ctx->dpi_phys_addr = oparams.dpi_phys_addr; ctx->dpi_size = oparams.dpi_size; entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) { rc = -ENOMEM; goto err; } entry->io_address = ctx->dpi_phys_addr; entry->length = ctx->dpi_size; entry->mmap_flag = QEDR_USER_MMAP_IO_WC; entry->dpi = ctx->dpi; entry->dev = dev; rc = rdma_user_mmap_entry_insert(uctx, &entry->rdma_entry, ctx->dpi_size); if (rc) { kfree(entry); goto err; } ctx->db_mmap_entry = &entry->rdma_entry; if (!dev->user_dpm_enabled) uresp.dpm_flags = 0; else if (rdma_protocol_iwarp(&dev->ibdev, 1)) uresp.dpm_flags = QEDR_DPM_TYPE_IWARP_LEGACY; else uresp.dpm_flags = QEDR_DPM_TYPE_ROCE_ENHANCED | QEDR_DPM_TYPE_ROCE_LEGACY | QEDR_DPM_TYPE_ROCE_EDPM_MODE; if (ureq.context_flags & QEDR_SUPPORT_DPM_SIZES) { uresp.dpm_flags |= QEDR_DPM_SIZES_SET; uresp.ldpm_limit_size = QEDR_LDPM_MAX_SIZE; uresp.edpm_trans_size = QEDR_EDPM_TRANS_SIZE; uresp.edpm_limit_size = QEDR_EDPM_MAX_SIZE; } uresp.wids_enabled = 1; uresp.wid_count = oparams.wid_count; uresp.db_pa = rdma_user_mmap_get_offset(ctx->db_mmap_entry); uresp.db_size = ctx->dpi_size; uresp.max_send_wr = dev->attr.max_sqe; uresp.max_recv_wr = dev->attr.max_rqe; uresp.max_srq_wr = dev->attr.max_srq_wr; uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE; uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE; uresp.sges_per_srq_wr = dev->attr.max_srq_sge; uresp.max_cqes = QEDR_MAX_CQES; rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp)); if (rc) goto err; ctx->dev = dev; DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n", &ctx->ibucontext); return 0; err: if (!ctx->db_mmap_entry) dev->ops->rdma_remove_user(dev->rdma_ctx, ctx->dpi); else rdma_user_mmap_entry_remove(ctx->db_mmap_entry); return rc; } void qedr_dealloc_ucontext(struct ib_ucontext *ibctx) { struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx); DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n", uctx); rdma_user_mmap_entry_remove(uctx->db_mmap_entry); } void qedr_mmap_free(struct rdma_user_mmap_entry *rdma_entry) { struct qedr_user_mmap_entry *entry = get_qedr_mmap_entry(rdma_entry); struct qedr_dev *dev = entry->dev; if (entry->mmap_flag == QEDR_USER_MMAP_PHYS_PAGE) free_page((unsigned long)entry->address); else if (entry->mmap_flag == QEDR_USER_MMAP_IO_WC) dev->ops->rdma_remove_user(dev->rdma_ctx, entry->dpi); kfree(entry); } int qedr_mmap(struct ib_ucontext *ucontext, struct vm_area_struct *vma) { struct ib_device *dev = ucontext->device; size_t length = vma->vm_end - vma->vm_start; struct rdma_user_mmap_entry *rdma_entry; struct qedr_user_mmap_entry *entry; int rc = 0; u64 pfn; ibdev_dbg(dev, "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n", vma->vm_start, vma->vm_end, length, vma->vm_pgoff); rdma_entry = rdma_user_mmap_entry_get(ucontext, vma); if (!rdma_entry) { ibdev_dbg(dev, "pgoff[%#lx] does not have valid entry\n", vma->vm_pgoff); return -EINVAL; } entry = get_qedr_mmap_entry(rdma_entry); ibdev_dbg(dev, "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n", entry->io_address, length, entry->mmap_flag); switch (entry->mmap_flag) { case QEDR_USER_MMAP_IO_WC: pfn = entry->io_address >> PAGE_SHIFT; rc = rdma_user_mmap_io(ucontext, vma, pfn, length, pgprot_writecombine(vma->vm_page_prot), rdma_entry); break; case QEDR_USER_MMAP_PHYS_PAGE: rc = vm_insert_page(vma, vma->vm_start, virt_to_page(entry->address)); break; default: rc = -EINVAL; } if (rc) ibdev_dbg(dev, "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n", entry->io_address, length, entry->mmap_flag, rc); rdma_user_mmap_entry_put(rdma_entry); return rc; } int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct ib_device *ibdev = ibpd->device; struct qedr_dev *dev = get_qedr_dev(ibdev); struct qedr_pd *pd = get_qedr_pd(ibpd); u16 pd_id; int rc; DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n", udata ? "User Lib" : "Kernel"); if (!dev->rdma_ctx) { DP_ERR(dev, "invalid RDMA context\n"); return -EINVAL; } rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id); if (rc) return rc; pd->pd_id = pd_id; if (udata) { struct qedr_alloc_pd_uresp uresp = { .pd_id = pd_id, }; struct qedr_ucontext *context = rdma_udata_to_drv_context( udata, struct qedr_ucontext, ibucontext); rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp)); if (rc) { DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id); dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id); return rc; } pd->uctx = context; pd->uctx->pd = pd; } return 0; } int qedr_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct qedr_dev *dev = get_qedr_dev(ibpd->device); struct qedr_pd *pd = get_qedr_pd(ibpd); DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id); dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id); return 0; } int qedr_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata) { struct qedr_dev *dev = get_qedr_dev(ibxrcd->device); struct qedr_xrcd *xrcd = get_qedr_xrcd(ibxrcd); return dev->ops->rdma_alloc_xrcd(dev->rdma_ctx, &xrcd->xrcd_id); } int qedr_dealloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata) { struct qedr_dev *dev = get_qedr_dev(ibxrcd->device); u16 xrcd_id = get_qedr_xrcd(ibxrcd)->xrcd_id; dev->ops->rdma_dealloc_xrcd(dev->rdma_ctx, xrcd_id); return 0; } static void qedr_free_pbl(struct qedr_dev *dev, struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl) { struct pci_dev *pdev = dev->pdev; int i; for (i = 0; i < pbl_info->num_pbls; i++) { if (!pbl[i].va) continue; dma_free_coherent(&pdev->dev, pbl_info->pbl_size, pbl[i].va, pbl[i].pa); } kfree(pbl); } #define MIN_FW_PBL_PAGE_SIZE (4 * 1024) #define MAX_FW_PBL_PAGE_SIZE (64 * 1024) #define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64)) #define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE) #define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE) static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev, struct qedr_pbl_info *pbl_info, gfp_t flags) { struct pci_dev *pdev = dev->pdev; struct qedr_pbl *pbl_table; dma_addr_t *pbl_main_tbl; dma_addr_t pa; void *va; int i; pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags); if (!pbl_table) return ERR_PTR(-ENOMEM); for (i = 0; i < pbl_info->num_pbls; i++) { va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size, &pa, flags); if (!va) goto err; pbl_table[i].va = va; pbl_table[i].pa = pa; } /* Two-Layer PBLs, if we have more than one pbl we need to initialize * the first one with physical pointers to all of the rest */ pbl_main_tbl = (dma_addr_t *)pbl_table[0].va; for (i = 0; i < pbl_info->num_pbls - 1; i++) pbl_main_tbl[i] = pbl_table[i + 1].pa; return pbl_table; err: for (i--; i >= 0; i--) dma_free_coherent(&pdev->dev, pbl_info->pbl_size, pbl_table[i].va, pbl_table[i].pa); qedr_free_pbl(dev, pbl_info, pbl_table); return ERR_PTR(-ENOMEM); } static int qedr_prepare_pbl_tbl(struct qedr_dev *dev, struct qedr_pbl_info *pbl_info, u32 num_pbes, int two_layer_capable) { u32 pbl_capacity; u32 pbl_size; u32 num_pbls; if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) { if (num_pbes > MAX_PBES_TWO_LAYER) { DP_ERR(dev, "prepare pbl table: too many pages %d\n", num_pbes); return -EINVAL; } /* calculate required pbl page size */ pbl_size = MIN_FW_PBL_PAGE_SIZE; pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) * NUM_PBES_ON_PAGE(pbl_size); while (pbl_capacity < num_pbes) { pbl_size *= 2; pbl_capacity = pbl_size / sizeof(u64); pbl_capacity = pbl_capacity * pbl_capacity; } num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size)); num_pbls++; /* One for the layer0 ( points to the pbls) */ pbl_info->two_layered = true; } else { /* One layered PBL */ num_pbls = 1; pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE, roundup_pow_of_two((num_pbes * sizeof(u64)))); pbl_info->two_layered = false; } pbl_info->num_pbls = num_pbls; pbl_info->pbl_size = pbl_size; pbl_info->num_pbes = num_pbes; DP_DEBUG(dev, QEDR_MSG_MR, "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n", pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size); return 0; } static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem, struct qedr_pbl *pbl, struct qedr_pbl_info *pbl_info, u32 pg_shift) { int pbe_cnt, total_num_pbes = 0; struct qedr_pbl *pbl_tbl; struct ib_block_iter biter; struct regpair *pbe; if (!pbl_info->num_pbes) return; /* If we have a two layered pbl, the first pbl points to the rest * of the pbls and the first entry lays on the second pbl in the table */ if (pbl_info->two_layered) pbl_tbl = &pbl[1]; else pbl_tbl = pbl; pbe = (struct regpair *)pbl_tbl->va; if (!pbe) { DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n"); return; } pbe_cnt = 0; rdma_umem_for_each_dma_block (umem, &biter, BIT(pg_shift)) { u64 pg_addr = rdma_block_iter_dma_address(&biter); pbe->lo = cpu_to_le32(pg_addr); pbe->hi = cpu_to_le32(upper_32_bits(pg_addr)); pbe_cnt++; total_num_pbes++; pbe++; if (total_num_pbes == pbl_info->num_pbes) return; /* If the given pbl is full storing the pbes, move to next pbl. */ if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) { pbl_tbl++; pbe = (struct regpair *)pbl_tbl->va; pbe_cnt = 0; } } } static int qedr_db_recovery_add(struct qedr_dev *dev, void __iomem *db_addr, void *db_data, enum qed_db_rec_width db_width, enum qed_db_rec_space db_space) { if (!db_data) { DP_DEBUG(dev, QEDR_MSG_INIT, "avoiding db rec since old lib\n"); return 0; } return dev->ops->common->db_recovery_add(dev->cdev, db_addr, db_data, db_width, db_space); } static void qedr_db_recovery_del(struct qedr_dev *dev, void __iomem *db_addr, void *db_data) { if (!db_data) { DP_DEBUG(dev, QEDR_MSG_INIT, "avoiding db rec since old lib\n"); return; } /* Ignore return code as there is not much we can do about it. Error * log will be printed inside. */ dev->ops->common->db_recovery_del(dev->cdev, db_addr, db_data); } static int qedr_copy_cq_uresp(struct qedr_dev *dev, struct qedr_cq *cq, struct ib_udata *udata, u32 db_offset) { struct qedr_create_cq_uresp uresp; int rc; memset(&uresp, 0, sizeof(uresp)); uresp.db_offset = db_offset; uresp.icid = cq->icid; if (cq->q.db_mmap_entry) uresp.db_rec_addr = rdma_user_mmap_get_offset(cq->q.db_mmap_entry); rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp)); if (rc) DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid); return rc; } static void consume_cqe(struct qedr_cq *cq) { if (cq->latest_cqe == cq->toggle_cqe) cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK; cq->latest_cqe = qed_chain_consume(&cq->pbl); } static inline int qedr_align_cq_entries(int entries) { u64 size, aligned_size; /* We allocate an extra entry that we don't report to the FW. */ size = (entries + 1) * QEDR_CQE_SIZE; aligned_size = ALIGN(size, PAGE_SIZE); return aligned_size / QEDR_CQE_SIZE; } static int qedr_init_user_db_rec(struct ib_udata *udata, struct qedr_dev *dev, struct qedr_userq *q, bool requires_db_rec) { struct qedr_ucontext *uctx = rdma_udata_to_drv_context(udata, struct qedr_ucontext, ibucontext); struct qedr_user_mmap_entry *entry; int rc; /* Aborting for non doorbell userqueue (SRQ) or non-supporting lib */ if (requires_db_rec == 0 || !uctx->db_rec) return 0; /* Allocate a page for doorbell recovery, add to mmap */ q->db_rec_data = (void *)get_zeroed_page(GFP_USER); if (!q->db_rec_data) { DP_ERR(dev, "get_zeroed_page failed\n"); return -ENOMEM; } entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) goto err_free_db_data; entry->address = q->db_rec_data; entry->length = PAGE_SIZE; entry->mmap_flag = QEDR_USER_MMAP_PHYS_PAGE; rc = rdma_user_mmap_entry_insert(&uctx->ibucontext, &entry->rdma_entry, PAGE_SIZE); if (rc) goto err_free_entry; q->db_mmap_entry = &entry->rdma_entry; return 0; err_free_entry: kfree(entry); err_free_db_data: free_page((unsigned long)q->db_rec_data); q->db_rec_data = NULL; return -ENOMEM; } static inline int qedr_init_user_queue(struct ib_udata *udata, struct qedr_dev *dev, struct qedr_userq *q, u64 buf_addr, size_t buf_len, bool requires_db_rec, int access, int alloc_and_init) { u32 fw_pages; int rc; q->buf_addr = buf_addr; q->buf_len = buf_len; q->umem = ib_umem_get(&dev->ibdev, q->buf_addr, q->buf_len, access); if (IS_ERR(q->umem)) { DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n", PTR_ERR(q->umem)); return PTR_ERR(q->umem); } fw_pages = ib_umem_num_dma_blocks(q->umem, 1 << FW_PAGE_SHIFT); rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0); if (rc) goto err0; if (alloc_and_init) { q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL); if (IS_ERR(q->pbl_tbl)) { rc = PTR_ERR(q->pbl_tbl); goto err0; } qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info, FW_PAGE_SHIFT); } else { q->pbl_tbl = kzalloc(sizeof(*q->pbl_tbl), GFP_KERNEL); if (!q->pbl_tbl) { rc = -ENOMEM; goto err0; } } /* mmap the user address used to store doorbell data for recovery */ return qedr_init_user_db_rec(udata, dev, q, requires_db_rec); err0: ib_umem_release(q->umem); q->umem = NULL; return rc; } static inline void qedr_init_cq_params(struct qedr_cq *cq, struct qedr_ucontext *ctx, struct qedr_dev *dev, int vector, int chain_entries, int page_cnt, u64 pbl_ptr, struct qed_rdma_create_cq_in_params *params) { memset(params, 0, sizeof(*params)); params->cq_handle_hi = upper_32_bits((uintptr_t)cq); params->cq_handle_lo = lower_32_bits((uintptr_t)cq); params->cnq_id = vector; params->cq_size = chain_entries - 1; params->dpi = (ctx) ? ctx->dpi : dev->dpi; params->pbl_num_pages = page_cnt; params->pbl_ptr = pbl_ptr; params->pbl_two_level = 0; } static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags) { cq->db.data.agg_flags = flags; cq->db.data.value = cpu_to_le32(cons); writeq(cq->db.raw, cq->db_addr); } int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) { struct qedr_cq *cq = get_qedr_cq(ibcq); unsigned long sflags; struct qedr_dev *dev; dev = get_qedr_dev(ibcq->device); if (cq->destroyed) { DP_ERR(dev, "warning: arm was invoked after destroy for cq %p (icid=%d)\n", cq, cq->icid); return -EINVAL; } if (cq->cq_type == QEDR_CQ_TYPE_GSI) return 0; spin_lock_irqsave(&cq->cq_lock, sflags); cq->arm_flags = 0; if (flags & IB_CQ_SOLICITED) cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD; if (flags & IB_CQ_NEXT_COMP) cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD; doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags); spin_unlock_irqrestore(&cq->cq_lock, sflags); return 0; } int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct ib_udata *udata) { struct ib_device *ibdev = ibcq->device; struct qedr_ucontext *ctx = rdma_udata_to_drv_context( udata, struct qedr_ucontext, ibucontext); struct qed_rdma_destroy_cq_out_params destroy_oparams; struct qed_rdma_destroy_cq_in_params destroy_iparams; struct qed_chain_init_params chain_params = { .mode = QED_CHAIN_MODE_PBL, .intended_use = QED_CHAIN_USE_TO_CONSUME, .cnt_type = QED_CHAIN_CNT_TYPE_U32, .elem_size = sizeof(union rdma_cqe), }; struct qedr_dev *dev = get_qedr_dev(ibdev); struct qed_rdma_create_cq_in_params params; struct qedr_create_cq_ureq ureq = {}; int vector = attr->comp_vector; int entries = attr->cqe; struct qedr_cq *cq = get_qedr_cq(ibcq); int chain_entries; u32 db_offset; int page_cnt; u64 pbl_ptr; u16 icid; int rc; DP_DEBUG(dev, QEDR_MSG_INIT, "create_cq: called from %s. entries=%d, vector=%d\n", udata ? "User Lib" : "Kernel", entries, vector); if (attr->flags) return -EOPNOTSUPP; if (entries > QEDR_MAX_CQES) { DP_ERR(dev, "create cq: the number of entries %d is too high. Must be equal or below %d.\n", entries, QEDR_MAX_CQES); return -EINVAL; } chain_entries = qedr_align_cq_entries(entries); chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES); chain_params.num_elems = chain_entries; /* calc db offset. user will add DPI base, kernel will add db addr */ db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT); if (udata) { if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq), udata->inlen))) { DP_ERR(dev, "create cq: problem copying data from user space\n"); goto err0; } if (!ureq.len) { DP_ERR(dev, "create cq: cannot create a cq with 0 entries\n"); goto err0; } cq->cq_type = QEDR_CQ_TYPE_USER; rc = qedr_init_user_queue(udata, dev, &cq->q, ureq.addr, ureq.len, true, IB_ACCESS_LOCAL_WRITE, 1); if (rc) goto err0; pbl_ptr = cq->q.pbl_tbl->pa; page_cnt = cq->q.pbl_info.num_pbes; cq->ibcq.cqe = chain_entries; cq->q.db_addr = ctx->dpi_addr + db_offset; } else { cq->cq_type = QEDR_CQ_TYPE_KERNEL; rc = dev->ops->common->chain_alloc(dev->cdev, &cq->pbl, &chain_params); if (rc) goto err0; page_cnt = qed_chain_get_page_cnt(&cq->pbl); pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl); cq->ibcq.cqe = cq->pbl.capacity; } qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt, pbl_ptr, &params); rc = dev->ops->rdma_create_cq(dev->rdma_ctx, &params, &icid); if (rc) goto err1; cq->icid = icid; cq->sig = QEDR_CQ_MAGIC_NUMBER; spin_lock_init(&cq->cq_lock); if (udata) { rc = qedr_copy_cq_uresp(dev, cq, udata, db_offset); if (rc) goto err2; rc = qedr_db_recovery_add(dev, cq->q.db_addr, &cq->q.db_rec_data->db_data, DB_REC_WIDTH_64B, DB_REC_USER); if (rc) goto err2; } else { /* Generate doorbell address. */ cq->db.data.icid = cq->icid; cq->db_addr = dev->db_addr + db_offset; cq->db.data.params = DB_AGG_CMD_MAX << RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT; /* point to the very last element, passing it we will toggle */ cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl); cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK; cq->latest_cqe = NULL; consume_cqe(cq); cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl); rc = qedr_db_recovery_add(dev, cq->db_addr, &cq->db.data, DB_REC_WIDTH_64B, DB_REC_KERNEL); if (rc) goto err2; } DP_DEBUG(dev, QEDR_MSG_CQ, "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n", cq->icid, cq, params.cq_size); return 0; err2: destroy_iparams.icid = cq->icid; dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams, &destroy_oparams); err1: if (udata) { qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl); ib_umem_release(cq->q.umem); if (cq->q.db_mmap_entry) rdma_user_mmap_entry_remove(cq->q.db_mmap_entry); } else { dev->ops->common->chain_free(dev->cdev, &cq->pbl); } err0: return -EINVAL; } #define QEDR_DESTROY_CQ_MAX_ITERATIONS (10) #define QEDR_DESTROY_CQ_ITER_DURATION (10) int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) { struct qedr_dev *dev = get_qedr_dev(ibcq->device); struct qed_rdma_destroy_cq_out_params oparams; struct qed_rdma_destroy_cq_in_params iparams; struct qedr_cq *cq = get_qedr_cq(ibcq); int iter; DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq %p (icid=%d)\n", cq, cq->icid); cq->destroyed = 1; /* GSIs CQs are handled by driver, so they don't exist in the FW */ if (cq->cq_type == QEDR_CQ_TYPE_GSI) { qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data); return 0; } iparams.icid = cq->icid; dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams); dev->ops->common->chain_free(dev->cdev, &cq->pbl); if (udata) { qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl); ib_umem_release(cq->q.umem); if (cq->q.db_rec_data) { qedr_db_recovery_del(dev, cq->q.db_addr, &cq->q.db_rec_data->db_data); rdma_user_mmap_entry_remove(cq->q.db_mmap_entry); } } else { qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data); } /* We don't want the IRQ handler to handle a non-existing CQ so we * wait until all CNQ interrupts, if any, are received. This will always * happen and will always happen very fast. If not, then a serious error * has occured. That is why we can use a long delay. * We spin for a short time so we don’t lose time on context switching * in case all the completions are handled in that span. Otherwise * we sleep for a while and check again. Since the CNQ may be * associated with (only) the current CPU we use msleep to allow the * current CPU to be freed. * The CNQ notification is increased in qedr_irq_handler(). */ iter = QEDR_DESTROY_CQ_MAX_ITERATIONS; while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) { udelay(QEDR_DESTROY_CQ_ITER_DURATION); iter--; } iter = QEDR_DESTROY_CQ_MAX_ITERATIONS; while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) { msleep(QEDR_DESTROY_CQ_ITER_DURATION); iter--; } /* Note that we don't need to have explicit code to wait for the * completion of the event handler because it is invoked from the EQ. * Since the destroy CQ ramrod has also been received on the EQ we can * be certain that there's no event handler in process. */ return 0; } static inline int get_gid_info_from_table(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct qed_rdma_modify_qp_in_params *qp_params) { const struct ib_gid_attr *gid_attr; enum rdma_network_type nw_type; const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr); u32 ipv4_addr; int ret; int i; gid_attr = grh->sgid_attr; ret = rdma_read_gid_l2_fields(gid_attr, &qp_params->vlan_id, NULL); if (ret) return ret; nw_type = rdma_gid_attr_network_type(gid_attr); switch (nw_type) { case RDMA_NETWORK_IPV6: memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0], sizeof(qp_params->sgid)); memcpy(&qp_params->dgid.bytes[0], &grh->dgid, sizeof(qp_params->dgid)); qp_params->roce_mode = ROCE_V2_IPV6; SET_FIELD(qp_params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1); break; case RDMA_NETWORK_ROCE_V1: memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0], sizeof(qp_params->sgid)); memcpy(&qp_params->dgid.bytes[0], &grh->dgid, sizeof(qp_params->dgid)); qp_params->roce_mode = ROCE_V1; break; case RDMA_NETWORK_IPV4: memset(&qp_params->sgid, 0, sizeof(qp_params->sgid)); memset(&qp_params->dgid, 0, sizeof(qp_params->dgid)); ipv4_addr = qedr_get_ipv4_from_gid(gid_attr->gid.raw); qp_params->sgid.ipv4_addr = ipv4_addr; ipv4_addr = qedr_get_ipv4_from_gid(grh->dgid.raw); qp_params->dgid.ipv4_addr = ipv4_addr; SET_FIELD(qp_params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1); qp_params->roce_mode = ROCE_V2_IPV4; break; default: return -EINVAL; } for (i = 0; i < 4; i++) { qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]); qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]); } if (qp_params->vlan_id >= VLAN_CFI_MASK) qp_params->vlan_id = 0; return 0; } static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev, struct ib_qp_init_attr *attrs, struct ib_udata *udata) { struct qedr_device_attr *qattr = &dev->attr; /* QP0... attrs->qp_type == IB_QPT_GSI */ if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI && attrs->qp_type != IB_QPT_XRC_INI && attrs->qp_type != IB_QPT_XRC_TGT) { DP_DEBUG(dev, QEDR_MSG_QP, "create qp: unsupported qp type=0x%x requested\n", attrs->qp_type); return -EOPNOTSUPP; } if (attrs->cap.max_send_wr > qattr->max_sqe) { DP_ERR(dev, "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n", attrs->cap.max_send_wr, qattr->max_sqe); return -EINVAL; } if (attrs->cap.max_inline_data > qattr->max_inline) { DP_ERR(dev, "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n", attrs->cap.max_inline_data, qattr->max_inline); return -EINVAL; } if (attrs->cap.max_send_sge > qattr->max_sge) { DP_ERR(dev, "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n", attrs->cap.max_send_sge, qattr->max_sge); return -EINVAL; } if (attrs->cap.max_recv_sge > qattr->max_sge) { DP_ERR(dev, "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n", attrs->cap.max_recv_sge, qattr->max_sge); return -EINVAL; } /* verify consumer QPs are not trying to use GSI QP's CQ. * TGT QP isn't associated with RQ/SQ */ if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created) && (attrs->qp_type != IB_QPT_XRC_TGT) && (attrs->qp_type != IB_QPT_XRC_INI)) { struct qedr_cq *send_cq = get_qedr_cq(attrs->send_cq); struct qedr_cq *recv_cq = get_qedr_cq(attrs->recv_cq); if ((send_cq->cq_type == QEDR_CQ_TYPE_GSI) || (recv_cq->cq_type == QEDR_CQ_TYPE_GSI)) { DP_ERR(dev, "create qp: consumer QP cannot use GSI CQs.\n"); return -EINVAL; } } return 0; } static int qedr_copy_srq_uresp(struct qedr_dev *dev, struct qedr_srq *srq, struct ib_udata *udata) { struct qedr_create_srq_uresp uresp = {}; int rc; uresp.srq_id = srq->srq_id; rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); if (rc) DP_ERR(dev, "create srq: problem copying data to user space\n"); return rc; } static void qedr_copy_rq_uresp(struct qedr_dev *dev, struct qedr_create_qp_uresp *uresp, struct qedr_qp *qp) { /* iWARP requires two doorbells per RQ. */ if (rdma_protocol_iwarp(&dev->ibdev, 1)) { uresp->rq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD); uresp->rq_db2_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS); } else { uresp->rq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD); } uresp->rq_icid = qp->icid; if (qp->urq.db_mmap_entry) uresp->rq_db_rec_addr = rdma_user_mmap_get_offset(qp->urq.db_mmap_entry); } static void qedr_copy_sq_uresp(struct qedr_dev *dev, struct qedr_create_qp_uresp *uresp, struct qedr_qp *qp) { uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD); /* iWARP uses the same cid for rq and sq */ if (rdma_protocol_iwarp(&dev->ibdev, 1)) uresp->sq_icid = qp->icid; else uresp->sq_icid = qp->icid + 1; if (qp->usq.db_mmap_entry) uresp->sq_db_rec_addr = rdma_user_mmap_get_offset(qp->usq.db_mmap_entry); } static int qedr_copy_qp_uresp(struct qedr_dev *dev, struct qedr_qp *qp, struct ib_udata *udata, struct qedr_create_qp_uresp *uresp) { int rc; memset(uresp, 0, sizeof(*uresp)); if (qedr_qp_has_sq(qp)) qedr_copy_sq_uresp(dev, uresp, qp); if (qedr_qp_has_rq(qp)) qedr_copy_rq_uresp(dev, uresp, qp); uresp->atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE; uresp->qp_id = qp->qp_id; rc = qedr_ib_copy_to_udata(udata, uresp, sizeof(*uresp)); if (rc) DP_ERR(dev, "create qp: failed a copy to user space with qp icid=0x%x.\n", qp->icid); return rc; } static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph) { qed_chain_reset(&qph->pbl); qph->prod = 0; qph->cons = 0; qph->wqe_cons = 0; qph->db_data.data.value = cpu_to_le16(0); } static void qedr_set_common_qp_params(struct qedr_dev *dev, struct qedr_qp *qp, struct qedr_pd *pd, struct ib_qp_init_attr *attrs) { spin_lock_init(&qp->q_lock); if (rdma_protocol_iwarp(&dev->ibdev, 1)) { kref_init(&qp->refcnt); init_completion(&qp->iwarp_cm_comp); init_completion(&qp->qp_rel_comp); } qp->pd = pd; qp->qp_type = attrs->qp_type; qp->max_inline_data = attrs->cap.max_inline_data; qp->state = QED_ROCE_QP_STATE_RESET; qp->prev_wqe_size = 0; qp->signaled = attrs->sq_sig_type == IB_SIGNAL_ALL_WR; qp->dev = dev; if (qedr_qp_has_sq(qp)) { qedr_reset_qp_hwq_info(&qp->sq); qp->sq.max_sges = attrs->cap.max_send_sge; qp->sq_cq = get_qedr_cq(attrs->send_cq); DP_DEBUG(dev, QEDR_MSG_QP, "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n", qp->sq.max_sges, qp->sq_cq->icid); } if (attrs->srq) qp->srq = get_qedr_srq(attrs->srq); if (qedr_qp_has_rq(qp)) { qedr_reset_qp_hwq_info(&qp->rq); qp->rq_cq = get_qedr_cq(attrs->recv_cq); qp->rq.max_sges = attrs->cap.max_recv_sge; DP_DEBUG(dev, QEDR_MSG_QP, "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n", qp->rq.max_sges, qp->rq_cq->icid); } DP_DEBUG(dev, QEDR_MSG_QP, "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n", pd->pd_id, qp->qp_type, qp->max_inline_data, qp->state, qp->signaled, (attrs->srq) ? 1 : 0); DP_DEBUG(dev, QEDR_MSG_QP, "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n", qp->sq.max_sges, qp->sq_cq->icid); } static int qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp) { int rc = 0; if (qedr_qp_has_sq(qp)) { qp->sq.db = dev->db_addr + DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD); qp->sq.db_data.data.icid = qp->icid + 1; rc = qedr_db_recovery_add(dev, qp->sq.db, &qp->sq.db_data, DB_REC_WIDTH_32B, DB_REC_KERNEL); if (rc) return rc; } if (qedr_qp_has_rq(qp)) { qp->rq.db = dev->db_addr + DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD); qp->rq.db_data.data.icid = qp->icid; rc = qedr_db_recovery_add(dev, qp->rq.db, &qp->rq.db_data, DB_REC_WIDTH_32B, DB_REC_KERNEL); if (rc && qedr_qp_has_sq(qp)) qedr_db_recovery_del(dev, qp->sq.db, &qp->sq.db_data); } return rc; } static int qedr_check_srq_params(struct qedr_dev *dev, struct ib_srq_init_attr *attrs, struct ib_udata *udata) { struct qedr_device_attr *qattr = &dev->attr; if (attrs->attr.max_wr > qattr->max_srq_wr) { DP_ERR(dev, "create srq: unsupported srq_wr=0x%x requested (max_srq_wr=0x%x)\n", attrs->attr.max_wr, qattr->max_srq_wr); return -EINVAL; } if (attrs->attr.max_sge > qattr->max_sge) { DP_ERR(dev, "create srq: unsupported sge=0x%x requested (max_srq_sge=0x%x)\n", attrs->attr.max_sge, qattr->max_sge); } if (!udata && attrs->srq_type == IB_SRQT_XRC) { DP_ERR(dev, "XRC SRQs are not supported in kernel-space\n"); return -EINVAL; } return 0; } static void qedr_free_srq_user_params(struct qedr_srq *srq) { qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl); ib_umem_release(srq->usrq.umem); ib_umem_release(srq->prod_umem); } static void qedr_free_srq_kernel_params(struct qedr_srq *srq) { struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq; struct qedr_dev *dev = srq->dev; dev->ops->common->chain_free(dev->cdev, &hw_srq->pbl); dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers), hw_srq->virt_prod_pair_addr, hw_srq->phy_prod_pair_addr); } static int qedr_init_srq_user_params(struct ib_udata *udata, struct qedr_srq *srq, struct qedr_create_srq_ureq *ureq, int access) { struct scatterlist *sg; int rc; rc = qedr_init_user_queue(udata, srq->dev, &srq->usrq, ureq->srq_addr, ureq->srq_len, false, access, 1); if (rc) return rc; srq->prod_umem = ib_umem_get(srq->ibsrq.device, ureq->prod_pair_addr, sizeof(struct rdma_srq_producers), access); if (IS_ERR(srq->prod_umem)) { qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl); ib_umem_release(srq->usrq.umem); DP_ERR(srq->dev, "create srq: failed ib_umem_get for producer, got %ld\n", PTR_ERR(srq->prod_umem)); return PTR_ERR(srq->prod_umem); } sg = srq->prod_umem->sgt_append.sgt.sgl; srq->hw_srq.phy_prod_pair_addr = sg_dma_address(sg); return 0; } static int qedr_alloc_srq_kernel_params(struct qedr_srq *srq, struct qedr_dev *dev, struct ib_srq_init_attr *init_attr) { struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq; struct qed_chain_init_params params = { .mode = QED_CHAIN_MODE_PBL, .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE, .cnt_type = QED_CHAIN_CNT_TYPE_U32, .elem_size = QEDR_SRQ_WQE_ELEM_SIZE, }; dma_addr_t phy_prod_pair_addr; u32 num_elems; void *va; int rc; va = dma_alloc_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers), &phy_prod_pair_addr, GFP_KERNEL); if (!va) { DP_ERR(dev, "create srq: failed to allocate dma memory for producer\n"); return -ENOMEM; } hw_srq->phy_prod_pair_addr = phy_prod_pair_addr; hw_srq->virt_prod_pair_addr = va; num_elems = init_attr->attr.max_wr * RDMA_MAX_SRQ_WQE_SIZE; params.num_elems = num_elems; rc = dev->ops->common->chain_alloc(dev->cdev, &hw_srq->pbl, &params); if (rc) goto err0; hw_srq->num_elems = num_elems; return 0; err0: dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers), va, phy_prod_pair_addr); return rc; } int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr, struct ib_udata *udata) { struct qed_rdma_destroy_srq_in_params destroy_in_params; struct qed_rdma_create_srq_in_params in_params = {}; struct qedr_dev *dev = get_qedr_dev(ibsrq->device); struct qed_rdma_create_srq_out_params out_params; struct qedr_pd *pd = get_qedr_pd(ibsrq->pd); struct qedr_create_srq_ureq ureq = {}; u64 pbl_base_addr, phy_prod_pair_addr; struct qedr_srq_hwq_info *hw_srq; u32 page_cnt, page_size; struct qedr_srq *srq = get_qedr_srq(ibsrq); int rc = 0; DP_DEBUG(dev, QEDR_MSG_QP, "create SRQ called from %s (pd %p)\n", (udata) ? "User lib" : "kernel", pd); if (init_attr->srq_type != IB_SRQT_BASIC && init_attr->srq_type != IB_SRQT_XRC) return -EOPNOTSUPP; rc = qedr_check_srq_params(dev, init_attr, udata); if (rc) return -EINVAL; srq->dev = dev; srq->is_xrc = (init_attr->srq_type == IB_SRQT_XRC); hw_srq = &srq->hw_srq; spin_lock_init(&srq->lock); hw_srq->max_wr = init_attr->attr.max_wr; hw_srq->max_sges = init_attr->attr.max_sge; if (udata) { if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq), udata->inlen))) { DP_ERR(dev, "create srq: problem copying data from user space\n"); goto err0; } rc = qedr_init_srq_user_params(udata, srq, &ureq, 0); if (rc) goto err0; page_cnt = srq->usrq.pbl_info.num_pbes; pbl_base_addr = srq->usrq.pbl_tbl->pa; phy_prod_pair_addr = hw_srq->phy_prod_pair_addr; page_size = PAGE_SIZE; } else { struct qed_chain *pbl; rc = qedr_alloc_srq_kernel_params(srq, dev, init_attr); if (rc) goto err0; pbl = &hw_srq->pbl; page_cnt = qed_chain_get_page_cnt(pbl); pbl_base_addr = qed_chain_get_pbl_phys(pbl); phy_prod_pair_addr = hw_srq->phy_prod_pair_addr; page_size = QED_CHAIN_PAGE_SIZE; } in_params.pd_id = pd->pd_id; in_params.pbl_base_addr = pbl_base_addr; in_params.prod_pair_addr = phy_prod_pair_addr; in_params.num_pages = page_cnt; in_params.page_size = page_size; if (srq->is_xrc) { struct qedr_xrcd *xrcd = get_qedr_xrcd(init_attr->ext.xrc.xrcd); struct qedr_cq *cq = get_qedr_cq(init_attr->ext.cq); in_params.is_xrc = 1; in_params.xrcd_id = xrcd->xrcd_id; in_params.cq_cid = cq->icid; } rc = dev->ops->rdma_create_srq(dev->rdma_ctx, &in_params, &out_params); if (rc) goto err1; srq->srq_id = out_params.srq_id; if (udata) { rc = qedr_copy_srq_uresp(dev, srq, udata); if (rc) goto err2; } rc = xa_insert_irq(&dev->srqs, srq->srq_id, srq, GFP_KERNEL); if (rc) goto err2; DP_DEBUG(dev, QEDR_MSG_SRQ, "create srq: created srq with srq_id=0x%0x\n", srq->srq_id); return 0; err2: destroy_in_params.srq_id = srq->srq_id; dev->ops->rdma_destroy_srq(dev->rdma_ctx, &destroy_in_params); err1: if (udata) qedr_free_srq_user_params(srq); else qedr_free_srq_kernel_params(srq); err0: return -EFAULT; } int qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) { struct qed_rdma_destroy_srq_in_params in_params = {}; struct qedr_dev *dev = get_qedr_dev(ibsrq->device); struct qedr_srq *srq = get_qedr_srq(ibsrq); xa_erase_irq(&dev->srqs, srq->srq_id); in_params.srq_id = srq->srq_id; in_params.is_xrc = srq->is_xrc; dev->ops->rdma_destroy_srq(dev->rdma_ctx, &in_params); if (ibsrq->uobject) qedr_free_srq_user_params(srq); else qedr_free_srq_kernel_params(srq); DP_DEBUG(dev, QEDR_MSG_SRQ, "destroy srq: destroyed srq with srq_id=0x%0x\n", srq->srq_id); return 0; } int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) { struct qed_rdma_modify_srq_in_params in_params = {}; struct qedr_dev *dev = get_qedr_dev(ibsrq->device); struct qedr_srq *srq = get_qedr_srq(ibsrq); int rc; if (attr_mask & IB_SRQ_MAX_WR) { DP_ERR(dev, "modify srq: invalid attribute mask=0x%x specified for %p\n", attr_mask, srq); return -EINVAL; } if (attr_mask & IB_SRQ_LIMIT) { if (attr->srq_limit >= srq->hw_srq.max_wr) { DP_ERR(dev, "modify srq: invalid srq_limit=0x%x (max_srq_limit=0x%x)\n", attr->srq_limit, srq->hw_srq.max_wr); return -EINVAL; } in_params.srq_id = srq->srq_id; in_params.wqe_limit = attr->srq_limit; rc = dev->ops->rdma_modify_srq(dev->rdma_ctx, &in_params); if (rc) return rc; } srq->srq_limit = attr->srq_limit; DP_DEBUG(dev, QEDR_MSG_SRQ, "modify srq: modified srq with srq_id=0x%0x\n", srq->srq_id); return 0; } static enum qed_rdma_qp_type qedr_ib_to_qed_qp_type(enum ib_qp_type ib_qp_type) { switch (ib_qp_type) { case IB_QPT_RC: return QED_RDMA_QP_TYPE_RC; case IB_QPT_XRC_INI: return QED_RDMA_QP_TYPE_XRC_INI; case IB_QPT_XRC_TGT: return QED_RDMA_QP_TYPE_XRC_TGT; default: return QED_RDMA_QP_TYPE_INVAL; } } static inline void qedr_init_common_qp_in_params(struct qedr_dev *dev, struct qedr_pd *pd, struct qedr_qp *qp, struct ib_qp_init_attr *attrs, bool fmr_and_reserved_lkey, struct qed_rdma_create_qp_in_params *params) { /* QP handle to be written in an async event */ params->qp_handle_async_lo = lower_32_bits((uintptr_t) qp); params->qp_handle_async_hi = upper_32_bits((uintptr_t) qp); params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR); params->fmr_and_reserved_lkey = fmr_and_reserved_lkey; params->qp_type = qedr_ib_to_qed_qp_type(attrs->qp_type); params->stats_queue = 0; if (pd) { params->pd = pd->pd_id; params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi; } if (qedr_qp_has_sq(qp)) params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid; if (qedr_qp_has_rq(qp)) params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid; if (qedr_qp_has_srq(qp)) { params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid; params->srq_id = qp->srq->srq_id; params->use_srq = true; } else { params->srq_id = 0; params->use_srq = false; } } static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp) { DP_DEBUG(dev, QEDR_MSG_QP, "create qp: successfully created user QP. " "qp=%p. " "sq_addr=0x%llx, " "sq_len=%zd, " "rq_addr=0x%llx, " "rq_len=%zd" "\n", qp, qedr_qp_has_sq(qp) ? qp->usq.buf_addr : 0x0, qedr_qp_has_sq(qp) ? qp->usq.buf_len : 0, qedr_qp_has_rq(qp) ? qp->urq.buf_addr : 0x0, qedr_qp_has_sq(qp) ? qp->urq.buf_len : 0); } static inline void qedr_iwarp_populate_user_qp(struct qedr_dev *dev, struct qedr_qp *qp, struct qed_rdma_create_qp_out_params *out_params) { qp->usq.pbl_tbl->va = out_params->sq_pbl_virt; qp->usq.pbl_tbl->pa = out_params->sq_pbl_phys; qedr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl, &qp->usq.pbl_info, FW_PAGE_SHIFT); if (!qp->srq) { qp->urq.pbl_tbl->va = out_params->rq_pbl_virt; qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys; } qedr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl, &qp->urq.pbl_info, FW_PAGE_SHIFT); } static void qedr_cleanup_user(struct qedr_dev *dev, struct qedr_ucontext *ctx, struct qedr_qp *qp) { if (qedr_qp_has_sq(qp)) { ib_umem_release(qp->usq.umem); qp->usq.umem = NULL; } if (qedr_qp_has_rq(qp)) { ib_umem_release(qp->urq.umem); qp->urq.umem = NULL; } if (rdma_protocol_roce(&dev->ibdev, 1)) { qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl); qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl); } else { kfree(qp->usq.pbl_tbl); kfree(qp->urq.pbl_tbl); } if (qp->usq.db_rec_data) { qedr_db_recovery_del(dev, qp->usq.db_addr, &qp->usq.db_rec_data->db_data); rdma_user_mmap_entry_remove(qp->usq.db_mmap_entry); } if (qp->urq.db_rec_data) { qedr_db_recovery_del(dev, qp->urq.db_addr, &qp->urq.db_rec_data->db_data); rdma_user_mmap_entry_remove(qp->urq.db_mmap_entry); } if (rdma_protocol_iwarp(&dev->ibdev, 1)) qedr_db_recovery_del(dev, qp->urq.db_rec_db2_addr, &qp->urq.db_rec_db2_data); } static int qedr_create_user_qp(struct qedr_dev *dev, struct qedr_qp *qp, struct ib_pd *ibpd, struct ib_udata *udata, struct ib_qp_init_attr *attrs) { struct qed_rdma_create_qp_in_params in_params; struct qed_rdma_create_qp_out_params out_params; struct qedr_create_qp_uresp uresp = {}; struct qedr_create_qp_ureq ureq = {}; int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1); struct qedr_ucontext *ctx = NULL; struct qedr_pd *pd = NULL; int rc = 0; qp->create_type = QEDR_QP_CREATE_USER; if (ibpd) { pd = get_qedr_pd(ibpd); ctx = pd->uctx; } if (udata) { rc = ib_copy_from_udata(&ureq, udata, min(sizeof(ureq), udata->inlen)); if (rc) { DP_ERR(dev, "Problem copying data from user space\n"); return rc; } } if (qedr_qp_has_sq(qp)) { /* SQ - read access only (0) */ rc = qedr_init_user_queue(udata, dev, &qp->usq, ureq.sq_addr, ureq.sq_len, true, 0, alloc_and_init); if (rc) return rc; } if (qedr_qp_has_rq(qp)) { /* RQ - read access only (0) */ rc = qedr_init_user_queue(udata, dev, &qp->urq, ureq.rq_addr, ureq.rq_len, true, 0, alloc_and_init); if (rc) return rc; } memset(&in_params, 0, sizeof(in_params)); qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params); in_params.qp_handle_lo = ureq.qp_handle_lo; in_params.qp_handle_hi = ureq.qp_handle_hi; if (qp->qp_type == IB_QPT_XRC_TGT) { struct qedr_xrcd *xrcd = get_qedr_xrcd(attrs->xrcd); in_params.xrcd_id = xrcd->xrcd_id; in_params.qp_handle_lo = qp->qp_id; in_params.use_srq = 1; } if (qedr_qp_has_sq(qp)) { in_params.sq_num_pages = qp->usq.pbl_info.num_pbes; in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa; } if (qedr_qp_has_rq(qp)) { in_params.rq_num_pages = qp->urq.pbl_info.num_pbes; in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa; } if (ctx) SET_FIELD(in_params.flags, QED_ROCE_EDPM_MODE, ctx->edpm_mode); qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx, &in_params, &out_params); if (!qp->qed_qp) { rc = -ENOMEM; goto err1; } if (rdma_protocol_iwarp(&dev->ibdev, 1)) qedr_iwarp_populate_user_qp(dev, qp, &out_params); qp->qp_id = out_params.qp_id; qp->icid = out_params.icid; if (udata) { rc = qedr_copy_qp_uresp(dev, qp, udata, &uresp); if (rc) goto err; } /* db offset was calculated in copy_qp_uresp, now set in the user q */ if (qedr_qp_has_sq(qp)) { qp->usq.db_addr = ctx->dpi_addr + uresp.sq_db_offset; qp->sq.max_wr = attrs->cap.max_send_wr; rc = qedr_db_recovery_add(dev, qp->usq.db_addr, &qp->usq.db_rec_data->db_data, DB_REC_WIDTH_32B, DB_REC_USER); if (rc) goto err; } if (qedr_qp_has_rq(qp)) { qp->urq.db_addr = ctx->dpi_addr + uresp.rq_db_offset; qp->rq.max_wr = attrs->cap.max_recv_wr; rc = qedr_db_recovery_add(dev, qp->urq.db_addr, &qp->urq.db_rec_data->db_data, DB_REC_WIDTH_32B, DB_REC_USER); if (rc) goto err; } if (rdma_protocol_iwarp(&dev->ibdev, 1)) { qp->urq.db_rec_db2_addr = ctx->dpi_addr + uresp.rq_db2_offset; /* calculate the db_rec_db2 data since it is constant so no * need to reflect from user */ qp->urq.db_rec_db2_data.data.icid = cpu_to_le16(qp->icid); qp->urq.db_rec_db2_data.data.value = cpu_to_le16(DQ_TCM_IWARP_POST_RQ_CF_CMD); rc = qedr_db_recovery_add(dev, qp->urq.db_rec_db2_addr, &qp->urq.db_rec_db2_data, DB_REC_WIDTH_32B, DB_REC_USER); if (rc) goto err; } qedr_qp_user_print(dev, qp); return rc; err: rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp); if (rc) DP_ERR(dev, "create qp: fatal fault. rc=%d", rc); err1: qedr_cleanup_user(dev, ctx, qp); return rc; } static int qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp) { int rc; qp->sq.db = dev->db_addr + DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD); qp->sq.db_data.data.icid = qp->icid; rc = qedr_db_recovery_add(dev, qp->sq.db, &qp->sq.db_data, DB_REC_WIDTH_32B, DB_REC_KERNEL); if (rc) return rc; qp->rq.db = dev->db_addr + DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD); qp->rq.db_data.data.icid = qp->icid; qp->rq.iwarp_db2 = dev->db_addr + DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS); qp->rq.iwarp_db2_data.data.icid = qp->icid; qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD; rc = qedr_db_recovery_add(dev, qp->rq.db, &qp->rq.db_data, DB_REC_WIDTH_32B, DB_REC_KERNEL); if (rc) return rc; rc = qedr_db_recovery_add(dev, qp->rq.iwarp_db2, &qp->rq.iwarp_db2_data, DB_REC_WIDTH_32B, DB_REC_KERNEL); return rc; } static int qedr_roce_create_kernel_qp(struct qedr_dev *dev, struct qedr_qp *qp, struct qed_rdma_create_qp_in_params *in_params, u32 n_sq_elems, u32 n_rq_elems) { struct qed_rdma_create_qp_out_params out_params; struct qed_chain_init_params params = { .mode = QED_CHAIN_MODE_PBL, .cnt_type = QED_CHAIN_CNT_TYPE_U32, }; int rc; params.intended_use = QED_CHAIN_USE_TO_PRODUCE; params.num_elems = n_sq_elems; params.elem_size = QEDR_SQE_ELEMENT_SIZE; rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, &params); if (rc) return rc; in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl); in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl); params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE; params.num_elems = n_rq_elems; params.elem_size = QEDR_RQE_ELEMENT_SIZE; rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, &params); if (rc) return rc; in_params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl); in_params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl); qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx, in_params, &out_params); if (!qp->qed_qp) return -EINVAL; qp->qp_id = out_params.qp_id; qp->icid = out_params.icid; return qedr_set_roce_db_info(dev, qp); } static int qedr_iwarp_create_kernel_qp(struct qedr_dev *dev, struct qedr_qp *qp, struct qed_rdma_create_qp_in_params *in_params, u32 n_sq_elems, u32 n_rq_elems) { struct qed_rdma_create_qp_out_params out_params; struct qed_chain_init_params params = { .mode = QED_CHAIN_MODE_PBL, .cnt_type = QED_CHAIN_CNT_TYPE_U32, }; int rc; in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems, QEDR_SQE_ELEMENT_SIZE, QED_CHAIN_PAGE_SIZE, QED_CHAIN_MODE_PBL); in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems, QEDR_RQE_ELEMENT_SIZE, QED_CHAIN_PAGE_SIZE, QED_CHAIN_MODE_PBL); qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx, in_params, &out_params); if (!qp->qed_qp) return -EINVAL; /* Now we allocate the chain */ params.intended_use = QED_CHAIN_USE_TO_PRODUCE; params.num_elems = n_sq_elems; params.elem_size = QEDR_SQE_ELEMENT_SIZE; params.ext_pbl_virt = out_params.sq_pbl_virt; params.ext_pbl_phys = out_params.sq_pbl_phys; rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, &params); if (rc) goto err; params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE; params.num_elems = n_rq_elems; params.elem_size = QEDR_RQE_ELEMENT_SIZE; params.ext_pbl_virt = out_params.rq_pbl_virt; params.ext_pbl_phys = out_params.rq_pbl_phys; rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, &params); if (rc) goto err; qp->qp_id = out_params.qp_id; qp->icid = out_params.icid; return qedr_set_iwarp_db_info(dev, qp); err: dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp); return rc; } static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp) { dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl); kfree(qp->wqe_wr_id); dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl); kfree(qp->rqe_wr_id); /* GSI qp is not registered to db mechanism so no need to delete */ if (qp->qp_type == IB_QPT_GSI) return; qedr_db_recovery_del(dev, qp->sq.db, &qp->sq.db_data); if (!qp->srq) { qedr_db_recovery_del(dev, qp->rq.db, &qp->rq.db_data); if (rdma_protocol_iwarp(&dev->ibdev, 1)) qedr_db_recovery_del(dev, qp->rq.iwarp_db2, &qp->rq.iwarp_db2_data); } } static int qedr_create_kernel_qp(struct qedr_dev *dev, struct qedr_qp *qp, struct ib_pd *ibpd, struct ib_qp_init_attr *attrs) { struct qed_rdma_create_qp_in_params in_params; struct qedr_pd *pd = get_qedr_pd(ibpd); int rc = -EINVAL; u32 n_rq_elems; u32 n_sq_elems; u32 n_sq_entries; memset(&in_params, 0, sizeof(in_params)); qp->create_type = QEDR_QP_CREATE_KERNEL; /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in * the ring. The ring should allow at least a single WR, even if the * user requested none, due to allocation issues. * We should add an extra WR since the prod and cons indices of * wqe_wr_id are managed in such a way that the WQ is considered full * when (prod+1)%max_wr==cons. We currently don't do that because we * double the number of entries due an iSER issue that pushes far more * WRs than indicated. If we decline its ib_post_send() then we get * error prints in the dmesg we'd like to avoid. */ qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier, dev->attr.max_sqe); qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id), GFP_KERNEL); if (!qp->wqe_wr_id) { DP_ERR(dev, "create qp: failed SQ shadow memory allocation\n"); return -ENOMEM; } /* QP handle to be written in CQE */ in_params.qp_handle_lo = lower_32_bits((uintptr_t) qp); in_params.qp_handle_hi = upper_32_bits((uintptr_t) qp); /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in * the ring. There ring should allow at least a single WR, even if the * user requested none, due to allocation issues. */ qp->rq.max_wr = (u16) max_t(u32, attrs->cap.max_recv_wr, 1); /* Allocate driver internal RQ array */ qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id), GFP_KERNEL); if (!qp->rqe_wr_id) { DP_ERR(dev, "create qp: failed RQ shadow memory allocation\n"); kfree(qp->wqe_wr_id); return -ENOMEM; } qedr_init_common_qp_in_params(dev, pd, qp, attrs, true, &in_params); n_sq_entries = attrs->cap.max_send_wr; n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe); n_sq_entries = max_t(u32, n_sq_entries, 1); n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE; n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE; if (rdma_protocol_iwarp(&dev->ibdev, 1)) rc = qedr_iwarp_create_kernel_qp(dev, qp, &in_params, n_sq_elems, n_rq_elems); else rc = qedr_roce_create_kernel_qp(dev, qp, &in_params, n_sq_elems, n_rq_elems); if (rc) qedr_cleanup_kernel(dev, qp); return rc; } static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp, struct ib_udata *udata) { struct qedr_ucontext *ctx = rdma_udata_to_drv_context(udata, struct qedr_ucontext, ibucontext); int rc; if (qp->qp_type != IB_QPT_GSI) { rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp); if (rc) return rc; } if (qp->create_type == QEDR_QP_CREATE_USER) qedr_cleanup_user(dev, ctx, qp); else qedr_cleanup_kernel(dev, qp); return 0; } int qedr_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs, struct ib_udata *udata) { struct qedr_xrcd *xrcd = NULL; struct ib_pd *ibpd = ibqp->pd; struct qedr_pd *pd = get_qedr_pd(ibpd); struct qedr_dev *dev = get_qedr_dev(ibqp->device); struct qedr_qp *qp = get_qedr_qp(ibqp); int rc = 0; if (attrs->create_flags) return -EOPNOTSUPP; if (attrs->qp_type == IB_QPT_XRC_TGT) xrcd = get_qedr_xrcd(attrs->xrcd); else pd = get_qedr_pd(ibpd); DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n", udata ? "user library" : "kernel", pd); rc = qedr_check_qp_attrs(ibpd, dev, attrs, udata); if (rc) return rc; DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n", udata ? "user library" : "kernel", attrs->event_handler, pd, get_qedr_cq(attrs->send_cq), get_qedr_cq(attrs->send_cq)->icid, get_qedr_cq(attrs->recv_cq), attrs->recv_cq ? get_qedr_cq(attrs->recv_cq)->icid : 0); qedr_set_common_qp_params(dev, qp, pd, attrs); if (attrs->qp_type == IB_QPT_GSI) return qedr_create_gsi_qp(dev, attrs, qp); if (udata || xrcd) rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs); else rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs); if (rc) return rc; qp->ibqp.qp_num = qp->qp_id; if (rdma_protocol_iwarp(&dev->ibdev, 1)) { rc = xa_insert(&dev->qps, qp->qp_id, qp, GFP_KERNEL); if (rc) goto out_free_qp_resources; } return 0; out_free_qp_resources: qedr_free_qp_resources(dev, qp, udata); return -EFAULT; } static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state) { switch (qp_state) { case QED_ROCE_QP_STATE_RESET: return IB_QPS_RESET; case QED_ROCE_QP_STATE_INIT: return IB_QPS_INIT; case QED_ROCE_QP_STATE_RTR: return IB_QPS_RTR; case QED_ROCE_QP_STATE_RTS: return IB_QPS_RTS; case QED_ROCE_QP_STATE_SQD: return IB_QPS_SQD; case QED_ROCE_QP_STATE_ERR: return IB_QPS_ERR; case QED_ROCE_QP_STATE_SQE: return IB_QPS_SQE; } return IB_QPS_ERR; } static enum qed_roce_qp_state qedr_get_state_from_ibqp( enum ib_qp_state qp_state) { switch (qp_state) { case IB_QPS_RESET: return QED_ROCE_QP_STATE_RESET; case IB_QPS_INIT: return QED_ROCE_QP_STATE_INIT; case IB_QPS_RTR: return QED_ROCE_QP_STATE_RTR; case IB_QPS_RTS: return QED_ROCE_QP_STATE_RTS; case IB_QPS_SQD: return QED_ROCE_QP_STATE_SQD; case IB_QPS_ERR: return QED_ROCE_QP_STATE_ERR; default: return QED_ROCE_QP_STATE_ERR; } } static int qedr_update_qp_state(struct qedr_dev *dev, struct qedr_qp *qp, enum qed_roce_qp_state cur_state, enum qed_roce_qp_state new_state) { int status = 0; if (new_state == cur_state) return 0; switch (cur_state) { case QED_ROCE_QP_STATE_RESET: switch (new_state) { case QED_ROCE_QP_STATE_INIT: break; default: status = -EINVAL; break; } break; case QED_ROCE_QP_STATE_INIT: switch (new_state) { case QED_ROCE_QP_STATE_RTR: /* Update doorbell (in case post_recv was * done before move to RTR) */ if (rdma_protocol_roce(&dev->ibdev, 1)) { writel(qp->rq.db_data.raw, qp->rq.db); } break; case QED_ROCE_QP_STATE_ERR: break; default: /* Invalid state change. */ status = -EINVAL; break; } break; case QED_ROCE_QP_STATE_RTR: /* RTR->XXX */ switch (new_state) { case QED_ROCE_QP_STATE_RTS: break; case QED_ROCE_QP_STATE_ERR: break; default: /* Invalid state change. */ status = -EINVAL; break; } break; case QED_ROCE_QP_STATE_RTS: /* RTS->XXX */ switch (new_state) { case QED_ROCE_QP_STATE_SQD: break; case QED_ROCE_QP_STATE_ERR: break; default: /* Invalid state change. */ status = -EINVAL; break; } break; case QED_ROCE_QP_STATE_SQD: /* SQD->XXX */ switch (new_state) { case QED_ROCE_QP_STATE_RTS: case QED_ROCE_QP_STATE_ERR: break; default: /* Invalid state change. */ status = -EINVAL; break; } break; case QED_ROCE_QP_STATE_ERR: /* ERR->XXX */ switch (new_state) { case QED_ROCE_QP_STATE_RESET: if ((qp->rq.prod != qp->rq.cons) || (qp->sq.prod != qp->sq.cons)) { DP_NOTICE(dev, "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n", qp->rq.prod, qp->rq.cons, qp->sq.prod, qp->sq.cons); status = -EINVAL; } break; default: status = -EINVAL; break; } break; default: status = -EINVAL; break; } return status; } int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { struct qedr_qp *qp = get_qedr_qp(ibqp); struct qed_rdma_modify_qp_in_params qp_params = { 0 }; struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev); const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr); enum ib_qp_state old_qp_state, new_qp_state; enum qed_roce_qp_state cur_state; int rc = 0; DP_DEBUG(dev, QEDR_MSG_QP, "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask, attr->qp_state); if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) return -EOPNOTSUPP; old_qp_state = qedr_get_ibqp_state(qp->state); if (attr_mask & IB_QP_STATE) new_qp_state = attr->qp_state; else new_qp_state = old_qp_state; if (rdma_protocol_roce(&dev->ibdev, 1)) { if (!ib_modify_qp_is_ok(old_qp_state, new_qp_state, ibqp->qp_type, attr_mask)) { DP_ERR(dev, "modify qp: invalid attribute mask=0x%x specified for\n" "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n", attr_mask, qp->qp_id, ibqp->qp_type, old_qp_state, new_qp_state); rc = -EINVAL; goto err; } } /* Translate the masks... */ if (attr_mask & IB_QP_STATE) { SET_FIELD(qp_params.modify_flags, QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1); qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state); } if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) qp_params.sqd_async = true; if (attr_mask & IB_QP_PKEY_INDEX) { SET_FIELD(qp_params.modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY, 1); if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) { rc = -EINVAL; goto err; } qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT; } if (attr_mask & IB_QP_QKEY) qp->qkey = attr->qkey; if (attr_mask & IB_QP_ACCESS_FLAGS) { SET_FIELD(qp_params.modify_flags, QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1); qp_params.incoming_rdma_read_en = attr->qp_access_flags & IB_ACCESS_REMOTE_READ; qp_params.incoming_rdma_write_en = attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE; qp_params.incoming_atomic_en = attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC; } if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) { if (rdma_protocol_iwarp(&dev->ibdev, 1)) return -EINVAL; if (attr_mask & IB_QP_PATH_MTU) { if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) { pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n"); rc = -EINVAL; goto err; } qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu), ib_mtu_enum_to_int(iboe_get_mtu (dev->ndev->mtu))); } if (!qp->mtu) { qp->mtu = ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu)); pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu); } SET_FIELD(qp_params.modify_flags, QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1); qp_params.traffic_class_tos = grh->traffic_class; qp_params.flow_label = grh->flow_label; qp_params.hop_limit_ttl = grh->hop_limit; qp->sgid_idx = grh->sgid_index; rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params); if (rc) { DP_ERR(dev, "modify qp: problems with GID index %d (rc=%d)\n", grh->sgid_index, rc); return rc; } rc = qedr_get_dmac(dev, &attr->ah_attr, qp_params.remote_mac_addr); if (rc) return rc; qp_params.use_local_mac = true; ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr); DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n", qp_params.dgid.dwords[0], qp_params.dgid.dwords[1], qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]); DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n", qp_params.sgid.dwords[0], qp_params.sgid.dwords[1], qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]); DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n", qp_params.remote_mac_addr); qp_params.mtu = qp->mtu; qp_params.lb_indication = false; } if (!qp_params.mtu) { /* Stay with current MTU */ if (qp->mtu) qp_params.mtu = qp->mtu; else qp_params.mtu = ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu)); } if (attr_mask & IB_QP_TIMEOUT) { SET_FIELD(qp_params.modify_flags, QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1); /* The received timeout value is an exponent used like this: * "12.7.34 LOCAL ACK TIMEOUT * Value representing the transport (ACK) timeout for use by * the remote, expressed as: 4.096 * 2^timeout [usec]" * The FW expects timeout in msec so we need to divide the usec * result by 1000. We'll approximate 1000~2^10, and 4.096 ~ 2^2, * so we get: 2^2 * 2^timeout / 2^10 = 2^(timeout - 8). * The value of zero means infinite so we use a 'max_t' to make * sure that sub 1 msec values will be configured as 1 msec. */ if (attr->timeout) qp_params.ack_timeout = 1 << max_t(int, attr->timeout - 8, 0); else qp_params.ack_timeout = 0; qp->timeout = attr->timeout; } if (attr_mask & IB_QP_RETRY_CNT) { SET_FIELD(qp_params.modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1); qp_params.retry_cnt = attr->retry_cnt; } if (attr_mask & IB_QP_RNR_RETRY) { SET_FIELD(qp_params.modify_flags, QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1); qp_params.rnr_retry_cnt = attr->rnr_retry; } if (attr_mask & IB_QP_RQ_PSN) { SET_FIELD(qp_params.modify_flags, QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1); qp_params.rq_psn = attr->rq_psn; qp->rq_psn = attr->rq_psn; } if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) { rc = -EINVAL; DP_ERR(dev, "unsupported max_rd_atomic=%d, supported=%d\n", attr->max_rd_atomic, dev->attr.max_qp_req_rd_atomic_resc); goto err; } SET_FIELD(qp_params.modify_flags, QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1); qp_params.max_rd_atomic_req = attr->max_rd_atomic; } if (attr_mask & IB_QP_MIN_RNR_TIMER) { SET_FIELD(qp_params.modify_flags, QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1); qp_params.min_rnr_nak_timer = attr->min_rnr_timer; } if (attr_mask & IB_QP_SQ_PSN) { SET_FIELD(qp_params.modify_flags, QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1); qp_params.sq_psn = attr->sq_psn; qp->sq_psn = attr->sq_psn; } if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { if (attr->max_dest_rd_atomic > dev->attr.max_qp_resp_rd_atomic_resc) { DP_ERR(dev, "unsupported max_dest_rd_atomic=%d, supported=%d\n", attr->max_dest_rd_atomic, dev->attr.max_qp_resp_rd_atomic_resc); rc = -EINVAL; goto err; } SET_FIELD(qp_params.modify_flags, QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1); qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic; } if (attr_mask & IB_QP_DEST_QPN) { SET_FIELD(qp_params.modify_flags, QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1); qp_params.dest_qp = attr->dest_qp_num; qp->dest_qp_num = attr->dest_qp_num; } cur_state = qp->state; /* Update the QP state before the actual ramrod to prevent a race with * fast path. Modifying the QP state to error will cause the device to * flush the CQEs and while polling the flushed CQEs will considered as * a potential issue if the QP isn't in error state. */ if ((attr_mask & IB_QP_STATE) && qp->qp_type != IB_QPT_GSI && !udata && qp_params.new_state == QED_ROCE_QP_STATE_ERR) qp->state = QED_ROCE_QP_STATE_ERR; if (qp->qp_type != IB_QPT_GSI) rc = dev->ops->rdma_modify_qp(dev->rdma_ctx, qp->qed_qp, &qp_params); if (attr_mask & IB_QP_STATE) { if ((qp->qp_type != IB_QPT_GSI) && (!udata)) rc = qedr_update_qp_state(dev, qp, cur_state, qp_params.new_state); qp->state = qp_params.new_state; } err: return rc; } static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params) { int ib_qp_acc_flags = 0; if (params->incoming_rdma_write_en) ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE; if (params->incoming_rdma_read_en) ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ; if (params->incoming_atomic_en) ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC; ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE; return ib_qp_acc_flags; } int qedr_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int attr_mask, struct ib_qp_init_attr *qp_init_attr) { struct qed_rdma_query_qp_out_params params; struct qedr_qp *qp = get_qedr_qp(ibqp); struct qedr_dev *dev = qp->dev; int rc = 0; memset(&params, 0, sizeof(params)); memset(qp_attr, 0, sizeof(*qp_attr)); memset(qp_init_attr, 0, sizeof(*qp_init_attr)); if (qp->qp_type != IB_QPT_GSI) { rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, &params); if (rc) goto err; qp_attr->qp_state = qedr_get_ibqp_state(params.state); } else { qp_attr->qp_state = qedr_get_ibqp_state(QED_ROCE_QP_STATE_RTS); } qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state); qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu); qp_attr->path_mig_state = IB_MIG_MIGRATED; qp_attr->rq_psn = params.rq_psn; qp_attr->sq_psn = params.sq_psn; qp_attr->dest_qp_num = params.dest_qp; qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(&params); qp_attr->cap.max_send_wr = qp->sq.max_wr; qp_attr->cap.max_recv_wr = qp->rq.max_wr; qp_attr->cap.max_send_sge = qp->sq.max_sges; qp_attr->cap.max_recv_sge = qp->rq.max_sges; qp_attr->cap.max_inline_data = dev->attr.max_inline; qp_init_attr->cap = qp_attr->cap; qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; rdma_ah_set_grh(&qp_attr->ah_attr, NULL, params.flow_label, qp->sgid_idx, params.hop_limit_ttl, params.traffic_class_tos); rdma_ah_set_dgid_raw(&qp_attr->ah_attr, &params.dgid.bytes[0]); rdma_ah_set_port_num(&qp_attr->ah_attr, 1); rdma_ah_set_sl(&qp_attr->ah_attr, 0); qp_attr->timeout = qp->timeout; qp_attr->rnr_retry = params.rnr_retry; qp_attr->retry_cnt = params.retry_cnt; qp_attr->min_rnr_timer = params.min_rnr_nak_timer; qp_attr->pkey_index = params.pkey_index; qp_attr->port_num = 1; rdma_ah_set_path_bits(&qp_attr->ah_attr, 0); rdma_ah_set_static_rate(&qp_attr->ah_attr, 0); qp_attr->alt_pkey_index = 0; qp_attr->alt_port_num = 0; qp_attr->alt_timeout = 0; memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr)); qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0; qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic; qp_attr->max_rd_atomic = params.max_rd_atomic; qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0; DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n", qp_attr->cap.max_inline_data); err: return rc; } int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) { struct qedr_qp *qp = get_qedr_qp(ibqp); struct qedr_dev *dev = qp->dev; struct ib_qp_attr attr; int attr_mask = 0; DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n", qp, qp->qp_type); if (rdma_protocol_roce(&dev->ibdev, 1)) { if ((qp->state != QED_ROCE_QP_STATE_RESET) && (qp->state != QED_ROCE_QP_STATE_ERR) && (qp->state != QED_ROCE_QP_STATE_INIT)) { attr.qp_state = IB_QPS_ERR; attr_mask |= IB_QP_STATE; /* Change the QP state to ERROR */ qedr_modify_qp(ibqp, &attr, attr_mask, NULL); } } else { /* If connection establishment started the WAIT_FOR_CONNECT * bit will be on and we need to Wait for the establishment * to complete before destroying the qp. */ if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT, &qp->iwarp_cm_flags)) wait_for_completion(&qp->iwarp_cm_comp); /* If graceful disconnect started, the WAIT_FOR_DISCONNECT * bit will be on, and we need to wait for the disconnect to * complete before continuing. We can use the same completion, * iwarp_cm_comp, since this is the only place that waits for * this completion and it is sequential. In addition, * disconnect can't occur before the connection is fully * established, therefore if WAIT_FOR_DISCONNECT is on it * means WAIT_FOR_CONNECT is also on and the completion for * CONNECT already occurred. */ if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_DISCONNECT, &qp->iwarp_cm_flags)) wait_for_completion(&qp->iwarp_cm_comp); } if (qp->qp_type == IB_QPT_GSI) qedr_destroy_gsi_qp(dev); /* We need to remove the entry from the xarray before we release the * qp_id to avoid a race of the qp_id being reallocated and failing * on xa_insert */ if (rdma_protocol_iwarp(&dev->ibdev, 1)) xa_erase(&dev->qps, qp->qp_id); qedr_free_qp_resources(dev, qp, udata); if (rdma_protocol_iwarp(&dev->ibdev, 1)) { qedr_iw_qp_rem_ref(&qp->ibqp); wait_for_completion(&qp->qp_rel_comp); } return 0; } int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, struct ib_udata *udata) { struct qedr_ah *ah = get_qedr_ah(ibah); rdma_copy_ah_attr(&ah->attr, init_attr->ah_attr); return 0; } int qedr_destroy_ah(struct ib_ah *ibah, u32 flags) { struct qedr_ah *ah = get_qedr_ah(ibah); rdma_destroy_ah_attr(&ah->attr); return 0; } static void free_mr_info(struct qedr_dev *dev, struct mr_info *info) { struct qedr_pbl *pbl, *tmp; if (info->pbl_table) list_add_tail(&info->pbl_table->list_entry, &info->free_pbl_list); if (!list_empty(&info->inuse_pbl_list)) list_splice(&info->inuse_pbl_list, &info->free_pbl_list); list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) { list_del(&pbl->list_entry); qedr_free_pbl(dev, &info->pbl_info, pbl); } } static int init_mr_info(struct qedr_dev *dev, struct mr_info *info, size_t page_list_len, bool two_layered) { struct qedr_pbl *tmp; int rc; INIT_LIST_HEAD(&info->free_pbl_list); INIT_LIST_HEAD(&info->inuse_pbl_list); rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info, page_list_len, two_layered); if (rc) goto done; info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL); if (IS_ERR(info->pbl_table)) { rc = PTR_ERR(info->pbl_table); goto done; } DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n", &info->pbl_table->pa); /* in usual case we use 2 PBLs, so we add one to free * list and allocating another one */ tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL); if (IS_ERR(tmp)) { DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n"); goto done; } list_add_tail(&tmp->list_entry, &info->free_pbl_list); DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa); done: if (rc) free_mr_info(dev, info); return rc; } struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, u64 usr_addr, int acc, struct ib_udata *udata) { struct qedr_dev *dev = get_qedr_dev(ibpd->device); struct qedr_mr *mr; struct qedr_pd *pd; int rc = -ENOMEM; pd = get_qedr_pd(ibpd); DP_DEBUG(dev, QEDR_MSG_MR, "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n", pd->pd_id, start, len, usr_addr, acc); if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) return ERR_PTR(-EINVAL); mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) return ERR_PTR(rc); mr->type = QEDR_MR_USER; mr->umem = ib_umem_get(ibpd->device, start, len, acc); if (IS_ERR(mr->umem)) { rc = -EFAULT; goto err0; } rc = init_mr_info(dev, &mr->info, ib_umem_num_dma_blocks(mr->umem, PAGE_SIZE), 1); if (rc) goto err1; qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table, &mr->info.pbl_info, PAGE_SHIFT); rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid); if (rc) { if (rc == -EINVAL) DP_ERR(dev, "Out of MR resources\n"); else DP_ERR(dev, "roce alloc tid returned error %d\n", rc); goto err1; } /* Index only, 18 bit long, lkey = itid << 8 | key */ mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR; mr->hw_mr.key = 0; mr->hw_mr.pd = pd->pd_id; mr->hw_mr.local_read = 1; mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0; mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0; mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0; mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0; mr->hw_mr.mw_bind = false; mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa; mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered; mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size); mr->hw_mr.page_size_log = PAGE_SHIFT; mr->hw_mr.length = len; mr->hw_mr.vaddr = usr_addr; mr->hw_mr.phy_mr = false; mr->hw_mr.dma_mr = false; rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr); if (rc) { DP_ERR(dev, "roce register tid returned an error %d\n", rc); goto err2; } mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key; if (mr->hw_mr.remote_write || mr->hw_mr.remote_read || mr->hw_mr.remote_atomic) mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key; DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n", mr->ibmr.lkey); return &mr->ibmr; err2: dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid); err1: qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table); err0: kfree(mr); return ERR_PTR(rc); } int qedr_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) { struct qedr_mr *mr = get_qedr_mr(ib_mr); struct qedr_dev *dev = get_qedr_dev(ib_mr->device); int rc = 0; rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid); if (rc) return rc; dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid); if (mr->type != QEDR_MR_DMA) free_mr_info(dev, &mr->info); /* it could be user registered memory. */ ib_umem_release(mr->umem); kfree(mr); return rc; } static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, int max_page_list_len) { struct qedr_pd *pd = get_qedr_pd(ibpd); struct qedr_dev *dev = get_qedr_dev(ibpd->device); struct qedr_mr *mr; int rc = -ENOMEM; DP_DEBUG(dev, QEDR_MSG_MR, "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id, max_page_list_len); mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) return ERR_PTR(rc); mr->dev = dev; mr->type = QEDR_MR_FRMR; rc = init_mr_info(dev, &mr->info, max_page_list_len, 1); if (rc) goto err0; rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid); if (rc) { if (rc == -EINVAL) DP_ERR(dev, "Out of MR resources\n"); else DP_ERR(dev, "roce alloc tid returned error %d\n", rc); goto err1; } /* Index only, 18 bit long, lkey = itid << 8 | key */ mr->hw_mr.tid_type = QED_RDMA_TID_FMR; mr->hw_mr.key = 0; mr->hw_mr.pd = pd->pd_id; mr->hw_mr.local_read = 1; mr->hw_mr.local_write = 0; mr->hw_mr.remote_read = 0; mr->hw_mr.remote_write = 0; mr->hw_mr.remote_atomic = 0; mr->hw_mr.mw_bind = false; mr->hw_mr.pbl_ptr = 0; mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered; mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size); mr->hw_mr.length = 0; mr->hw_mr.vaddr = 0; mr->hw_mr.phy_mr = true; mr->hw_mr.dma_mr = false; rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr); if (rc) { DP_ERR(dev, "roce register tid returned an error %d\n", rc); goto err2; } mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key; mr->ibmr.rkey = mr->ibmr.lkey; DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey); return mr; err2: dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid); err1: qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table); err0: kfree(mr); return ERR_PTR(rc); } struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, u32 max_num_sg) { struct qedr_mr *mr; if (mr_type != IB_MR_TYPE_MEM_REG) return ERR_PTR(-EINVAL); mr = __qedr_alloc_mr(ibpd, max_num_sg); if (IS_ERR(mr)) return ERR_PTR(-EINVAL); return &mr->ibmr; } static int qedr_set_page(struct ib_mr *ibmr, u64 addr) { struct qedr_mr *mr = get_qedr_mr(ibmr); struct qedr_pbl *pbl_table; struct regpair *pbe; u32 pbes_in_page; if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) { DP_ERR(mr->dev, "qedr_set_page fails when %d\n", mr->npages); return -ENOMEM; } DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n", mr->npages, addr); pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64); pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page); pbe = (struct regpair *)pbl_table->va; pbe += mr->npages % pbes_in_page; pbe->lo = cpu_to_le32((u32)addr); pbe->hi = cpu_to_le32((u32)upper_32_bits(addr)); mr->npages++; return 0; } static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info) { int work = info->completed - info->completed_handled - 1; DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work); while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) { struct qedr_pbl *pbl; /* Free all the page list that are possible to be freed * (all the ones that were invalidated), under the assumption * that if an FMR was completed successfully that means that * if there was an invalidate operation before it also ended */ pbl = list_first_entry(&info->inuse_pbl_list, struct qedr_pbl, list_entry); list_move_tail(&pbl->list_entry, &info->free_pbl_list); info->completed_handled++; } } int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset) { struct qedr_mr *mr = get_qedr_mr(ibmr); mr->npages = 0; handle_completed_mrs(mr->dev, &mr->info); return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page); } struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc) { struct qedr_dev *dev = get_qedr_dev(ibpd->device); struct qedr_pd *pd = get_qedr_pd(ibpd); struct qedr_mr *mr; int rc; mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) return ERR_PTR(-ENOMEM); mr->type = QEDR_MR_DMA; rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid); if (rc) { if (rc == -EINVAL) DP_ERR(dev, "Out of MR resources\n"); else DP_ERR(dev, "roce alloc tid returned error %d\n", rc); goto err1; } /* index only, 18 bit long, lkey = itid << 8 | key */ mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR; mr->hw_mr.pd = pd->pd_id; mr->hw_mr.local_read = 1; mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0; mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0; mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0; mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0; mr->hw_mr.dma_mr = true; rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr); if (rc) { DP_ERR(dev, "roce register tid returned an error %d\n", rc); goto err2; } mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key; if (mr->hw_mr.remote_write || mr->hw_mr.remote_read || mr->hw_mr.remote_atomic) mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key; DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey); return &mr->ibmr; err2: dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid); err1: kfree(mr); return ERR_PTR(rc); } static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq) { return (((wq->prod + 1) % wq->max_wr) == wq->cons); } static int sge_data_len(struct ib_sge *sg_list, int num_sge) { int i, len = 0; for (i = 0; i < num_sge; i++) len += sg_list[i].length; return len; } static void swap_wqe_data64(u64 *p) { int i; for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++) *p = cpu_to_be64(cpu_to_le64(*p)); } static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev, struct qedr_qp *qp, u8 *wqe_size, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr, u8 *bits, u8 bit) { u32 data_size = sge_data_len(wr->sg_list, wr->num_sge); char *seg_prt, *wqe; int i, seg_siz; if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) { DP_ERR(dev, "Too much inline data in WR: %d\n", data_size); *bad_wr = wr; return 0; } if (!data_size) return data_size; *bits |= bit; seg_prt = NULL; wqe = NULL; seg_siz = 0; /* Copy data inline */ for (i = 0; i < wr->num_sge; i++) { u32 len = wr->sg_list[i].length; void *src = (void *)(uintptr_t)wr->sg_list[i].addr; while (len > 0) { u32 cur; /* New segment required */ if (!seg_siz) { wqe = (char *)qed_chain_produce(&qp->sq.pbl); seg_prt = wqe; seg_siz = sizeof(struct rdma_sq_common_wqe); (*wqe_size)++; } /* Calculate currently allowed length */ cur = min_t(u32, len, seg_siz); memcpy(seg_prt, src, cur); /* Update segment variables */ seg_prt += cur; seg_siz -= cur; /* Update sge variables */ src += cur; len -= cur; /* Swap fully-completed segments */ if (!seg_siz) swap_wqe_data64((u64 *)wqe); } } /* swap last not completed segment */ if (seg_siz) swap_wqe_data64((u64 *)wqe); return data_size; } #define RQ_SGE_SET(sge, vaddr, vlength, vflags) \ do { \ DMA_REGPAIR_LE(sge->addr, vaddr); \ (sge)->length = cpu_to_le32(vlength); \ (sge)->flags = cpu_to_le32(vflags); \ } while (0) #define SRQ_HDR_SET(hdr, vwr_id, num_sge) \ do { \ DMA_REGPAIR_LE(hdr->wr_id, vwr_id); \ (hdr)->num_sges = num_sge; \ } while (0) #define SRQ_SGE_SET(sge, vaddr, vlength, vlkey) \ do { \ DMA_REGPAIR_LE(sge->addr, vaddr); \ (sge)->length = cpu_to_le32(vlength); \ (sge)->l_key = cpu_to_le32(vlkey); \ } while (0) static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size, const struct ib_send_wr *wr) { u32 data_size = 0; int i; for (i = 0; i < wr->num_sge; i++) { struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl); DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr); sge->l_key = cpu_to_le32(wr->sg_list[i].lkey); sge->length = cpu_to_le32(wr->sg_list[i].length); data_size += wr->sg_list[i].length; } if (wqe_size) *wqe_size += wr->num_sge; return data_size; } static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev, struct qedr_qp *qp, struct rdma_sq_rdma_wqe_1st *rwqe, struct rdma_sq_rdma_wqe_2nd *rwqe2, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) { rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey); DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr); if (wr->send_flags & IB_SEND_INLINE && (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM || wr->opcode == IB_WR_RDMA_WRITE)) { u8 flags = 0; SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1); return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr, bad_wr, &rwqe->flags, flags); } return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr); } static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev, struct qedr_qp *qp, struct rdma_sq_send_wqe_1st *swqe, struct rdma_sq_send_wqe_2st *swqe2, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) { memset(swqe2, 0, sizeof(*swqe2)); if (wr->send_flags & IB_SEND_INLINE) { u8 flags = 0; SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1); return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr, bad_wr, &swqe->flags, flags); } return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr); } static int qedr_prepare_reg(struct qedr_qp *qp, struct rdma_sq_fmr_wqe_1st *fwqe1, const struct ib_reg_wr *wr) { struct qedr_mr *mr = get_qedr_mr(wr->mr); struct rdma_sq_fmr_wqe_2nd *fwqe2; fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl); fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova); fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova); fwqe1->l_key = wr->key; fwqe2->access_ctrl = 0; SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ, !!(wr->access & IB_ACCESS_REMOTE_READ)); SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE, !!(wr->access & IB_ACCESS_REMOTE_WRITE)); SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC, !!(wr->access & IB_ACCESS_REMOTE_ATOMIC)); SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1); SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE, !!(wr->access & IB_ACCESS_LOCAL_WRITE)); fwqe2->fmr_ctrl = 0; SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG, ilog2(mr->ibmr.page_size) - 12); fwqe2->length_hi = 0; fwqe2->length_lo = mr->ibmr.length; fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa); fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa); qp->wqe_wr_id[qp->sq.prod].mr = mr; return 0; } static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode) { switch (opcode) { case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE_WITH_IMM: return IB_WC_RDMA_WRITE; case IB_WR_SEND_WITH_IMM: case IB_WR_SEND: case IB_WR_SEND_WITH_INV: return IB_WC_SEND; case IB_WR_RDMA_READ: case IB_WR_RDMA_READ_WITH_INV: return IB_WC_RDMA_READ; case IB_WR_ATOMIC_CMP_AND_SWP: return IB_WC_COMP_SWAP; case IB_WR_ATOMIC_FETCH_AND_ADD: return IB_WC_FETCH_ADD; case IB_WR_REG_MR: return IB_WC_REG_MR; case IB_WR_LOCAL_INV: return IB_WC_LOCAL_INV; default: return IB_WC_SEND; } } static inline bool qedr_can_post_send(struct qedr_qp *qp, const struct ib_send_wr *wr) { int wq_is_full, err_wr, pbl_is_full; struct qedr_dev *dev = qp->dev; /* prevent SQ overflow and/or processing of a bad WR */ err_wr = wr->num_sge > qp->sq.max_sges; wq_is_full = qedr_wq_is_full(&qp->sq); pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) < QEDR_MAX_SQE_ELEMENTS_PER_SQE; if (wq_is_full || err_wr || pbl_is_full) { if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) { DP_ERR(dev, "error: WQ is full. Post send on QP %p failed (this error appears only once)\n", qp); qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL; } if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) { DP_ERR(dev, "error: WR is bad. Post send on QP %p failed (this error appears only once)\n", qp); qp->err_bitmap |= QEDR_QP_ERR_BAD_SR; } if (pbl_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) { DP_ERR(dev, "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n", qp); qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL; } return false; } return true; } static int __qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) { struct qedr_dev *dev = get_qedr_dev(ibqp->device); struct qedr_qp *qp = get_qedr_qp(ibqp); struct rdma_sq_atomic_wqe_1st *awqe1; struct rdma_sq_atomic_wqe_2nd *awqe2; struct rdma_sq_atomic_wqe_3rd *awqe3; struct rdma_sq_send_wqe_2st *swqe2; struct rdma_sq_local_inv_wqe *iwqe; struct rdma_sq_rdma_wqe_2nd *rwqe2; struct rdma_sq_send_wqe_1st *swqe; struct rdma_sq_rdma_wqe_1st *rwqe; struct rdma_sq_fmr_wqe_1st *fwqe1; struct rdma_sq_common_wqe *wqe; u32 length; int rc = 0; bool comp; if (!qedr_can_post_send(qp, wr)) { *bad_wr = wr; return -ENOMEM; } wqe = qed_chain_produce(&qp->sq.pbl); qp->wqe_wr_id[qp->sq.prod].signaled = !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled; wqe->flags = 0; SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG, !!(wr->send_flags & IB_SEND_SOLICITED)); comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled; SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp); SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG, !!(wr->send_flags & IB_SEND_FENCE)); wqe->prev_wqe_size = qp->prev_wqe_size; qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode); switch (wr->opcode) { case IB_WR_SEND_WITH_IMM: if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) { rc = -EINVAL; *bad_wr = wr; break; } wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM; swqe = (struct rdma_sq_send_wqe_1st *)wqe; swqe->wqe_size = 2; swqe2 = qed_chain_produce(&qp->sq.pbl); swqe->inv_key_or_imm_data = cpu_to_le32(be32_to_cpu(wr->ex.imm_data)); length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2, wr, bad_wr); swqe->length = cpu_to_le32(length); qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size; qp->prev_wqe_size = swqe->wqe_size; qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length; break; case IB_WR_SEND: wqe->req_type = RDMA_SQ_REQ_TYPE_SEND; swqe = (struct rdma_sq_send_wqe_1st *)wqe; swqe->wqe_size = 2; swqe2 = qed_chain_produce(&qp->sq.pbl); length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2, wr, bad_wr); swqe->length = cpu_to_le32(length); qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size; qp->prev_wqe_size = swqe->wqe_size; qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length; break; case IB_WR_SEND_WITH_INV: wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE; swqe = (struct rdma_sq_send_wqe_1st *)wqe; swqe2 = qed_chain_produce(&qp->sq.pbl); swqe->wqe_size = 2; swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey); length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2, wr, bad_wr); swqe->length = cpu_to_le32(length); qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size; qp->prev_wqe_size = swqe->wqe_size; qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length; break; case IB_WR_RDMA_WRITE_WITH_IMM: if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) { rc = -EINVAL; *bad_wr = wr; break; } wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM; rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe; rwqe->wqe_size = 2; rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data)); rwqe2 = qed_chain_produce(&qp->sq.pbl); length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2, wr, bad_wr); rwqe->length = cpu_to_le32(length); qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size; qp->prev_wqe_size = rwqe->wqe_size; qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length; break; case IB_WR_RDMA_WRITE: wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR; rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe; rwqe->wqe_size = 2; rwqe2 = qed_chain_produce(&qp->sq.pbl); length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2, wr, bad_wr); rwqe->length = cpu_to_le32(length); qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size; qp->prev_wqe_size = rwqe->wqe_size; qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length; break; case IB_WR_RDMA_READ_WITH_INV: SET_FIELD2(wqe->flags, RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG, 1); fallthrough; /* same is identical to RDMA READ */ case IB_WR_RDMA_READ: wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD; rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe; rwqe->wqe_size = 2; rwqe2 = qed_chain_produce(&qp->sq.pbl); length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2, wr, bad_wr); rwqe->length = cpu_to_le32(length); qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size; qp->prev_wqe_size = rwqe->wqe_size; qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length; break; case IB_WR_ATOMIC_CMP_AND_SWP: case IB_WR_ATOMIC_FETCH_AND_ADD: awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe; awqe1->wqe_size = 4; awqe2 = qed_chain_produce(&qp->sq.pbl); DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr); awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey); awqe3 = qed_chain_produce(&qp->sq.pbl); if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD; DMA_REGPAIR_LE(awqe3->swap_data, atomic_wr(wr)->compare_add); } else { wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP; DMA_REGPAIR_LE(awqe3->swap_data, atomic_wr(wr)->swap); DMA_REGPAIR_LE(awqe3->cmp_data, atomic_wr(wr)->compare_add); } qedr_prepare_sq_sges(qp, NULL, wr); qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size; qp->prev_wqe_size = awqe1->wqe_size; break; case IB_WR_LOCAL_INV: iwqe = (struct rdma_sq_local_inv_wqe *)wqe; iwqe->wqe_size = 1; iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE; iwqe->inv_l_key = wr->ex.invalidate_rkey; qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size; qp->prev_wqe_size = iwqe->wqe_size; break; case IB_WR_REG_MR: DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n"); wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR; fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe; fwqe1->wqe_size = 2; rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr)); if (rc) { DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc); *bad_wr = wr; break; } qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size; qp->prev_wqe_size = fwqe1->wqe_size; break; default: DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode); rc = -EINVAL; *bad_wr = wr; break; } if (*bad_wr) { u16 value; /* Restore prod to its position before * this WR was processed */ value = le16_to_cpu(qp->sq.db_data.data.value); qed_chain_set_prod(&qp->sq.pbl, value, wqe); /* Restore prev_wqe_size */ qp->prev_wqe_size = wqe->prev_wqe_size; rc = -EINVAL; DP_ERR(dev, "POST SEND FAILED\n"); } return rc; } int qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) { struct qedr_dev *dev = get_qedr_dev(ibqp->device); struct qedr_qp *qp = get_qedr_qp(ibqp); unsigned long flags; int rc = 0; *bad_wr = NULL; if (qp->qp_type == IB_QPT_GSI) return qedr_gsi_post_send(ibqp, wr, bad_wr); spin_lock_irqsave(&qp->q_lock, flags); if (rdma_protocol_roce(&dev->ibdev, 1)) { if ((qp->state != QED_ROCE_QP_STATE_RTS) && (qp->state != QED_ROCE_QP_STATE_ERR) && (qp->state != QED_ROCE_QP_STATE_SQD)) { spin_unlock_irqrestore(&qp->q_lock, flags); *bad_wr = wr; DP_DEBUG(dev, QEDR_MSG_CQ, "QP in wrong state! QP icid=0x%x state %d\n", qp->icid, qp->state); return -EINVAL; } } while (wr) { rc = __qedr_post_send(ibqp, wr, bad_wr); if (rc) break; qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id; qedr_inc_sw_prod(&qp->sq); qp->sq.db_data.data.value++; wr = wr->next; } /* Trigger doorbell * If there was a failure in the first WR then it will be triggered in * vane. However this is not harmful (as long as the producer value is * unchanged). For performance reasons we avoid checking for this * redundant doorbell. * * qp->wqe_wr_id is accessed during qedr_poll_cq, as * soon as we give the doorbell, we could get a completion * for this wr, therefore we need to make sure that the * memory is updated before giving the doorbell. * During qedr_poll_cq, rmb is called before accessing the * cqe. This covers for the smp_rmb as well. */ smp_wmb(); writel(qp->sq.db_data.raw, qp->sq.db); spin_unlock_irqrestore(&qp->q_lock, flags); return rc; } static u32 qedr_srq_elem_left(struct qedr_srq_hwq_info *hw_srq) { u32 used; /* Calculate number of elements used based on producer * count and consumer count and subtract it from max * work request supported so that we get elements left. */ used = hw_srq->wr_prod_cnt - (u32)atomic_read(&hw_srq->wr_cons_cnt); return hw_srq->max_wr - used; } int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) { struct qedr_srq *srq = get_qedr_srq(ibsrq); struct qedr_srq_hwq_info *hw_srq; struct qedr_dev *dev = srq->dev; struct qed_chain *pbl; unsigned long flags; int status = 0; u32 num_sge; spin_lock_irqsave(&srq->lock, flags); hw_srq = &srq->hw_srq; pbl = &srq->hw_srq.pbl; while (wr) { struct rdma_srq_wqe_header *hdr; int i; if (!qedr_srq_elem_left(hw_srq) || wr->num_sge > srq->hw_srq.max_sges) { DP_ERR(dev, "Can't post WR (%d,%d) || (%d > %d)\n", hw_srq->wr_prod_cnt, atomic_read(&hw_srq->wr_cons_cnt), wr->num_sge, srq->hw_srq.max_sges); status = -ENOMEM; *bad_wr = wr; break; } hdr = qed_chain_produce(pbl); num_sge = wr->num_sge; /* Set number of sge and work request id in header */ SRQ_HDR_SET(hdr, wr->wr_id, num_sge); srq->hw_srq.wr_prod_cnt++; hw_srq->wqe_prod++; hw_srq->sge_prod++; DP_DEBUG(dev, QEDR_MSG_SRQ, "SRQ WR: SGEs: %d with wr_id[%d] = %llx\n", wr->num_sge, hw_srq->wqe_prod, wr->wr_id); for (i = 0; i < wr->num_sge; i++) { struct rdma_srq_sge *srq_sge = qed_chain_produce(pbl); /* Set SGE length, lkey and address */ SRQ_SGE_SET(srq_sge, wr->sg_list[i].addr, wr->sg_list[i].length, wr->sg_list[i].lkey); DP_DEBUG(dev, QEDR_MSG_SRQ, "[%d]: len %d key %x addr %x:%x\n", i, srq_sge->length, srq_sge->l_key, srq_sge->addr.hi, srq_sge->addr.lo); hw_srq->sge_prod++; } /* Update WQE and SGE information before * updating producer. */ dma_wmb(); /* SRQ producer is 8 bytes. Need to update SGE producer index * in first 4 bytes and need to update WQE producer in * next 4 bytes. */ srq->hw_srq.virt_prod_pair_addr->sge_prod = cpu_to_le32(hw_srq->sge_prod); /* Make sure sge producer is updated first */ dma_wmb(); srq->hw_srq.virt_prod_pair_addr->wqe_prod = cpu_to_le32(hw_srq->wqe_prod); wr = wr->next; } DP_DEBUG(dev, QEDR_MSG_SRQ, "POST: Elements in S-RQ: %d\n", qed_chain_get_elem_left(pbl)); spin_unlock_irqrestore(&srq->lock, flags); return status; } int qedr_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) { struct qedr_qp *qp = get_qedr_qp(ibqp); struct qedr_dev *dev = qp->dev; unsigned long flags; int status = 0; if (qp->qp_type == IB_QPT_GSI) return qedr_gsi_post_recv(ibqp, wr, bad_wr); spin_lock_irqsave(&qp->q_lock, flags); while (wr) { int i; if (qed_chain_get_elem_left_u32(&qp->rq.pbl) < QEDR_MAX_RQE_ELEMENTS_PER_RQE || wr->num_sge > qp->rq.max_sges) { DP_ERR(dev, "Can't post WR (%d < %d) || (%d > %d)\n", qed_chain_get_elem_left_u32(&qp->rq.pbl), QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge, qp->rq.max_sges); status = -ENOMEM; *bad_wr = wr; break; } for (i = 0; i < wr->num_sge; i++) { u32 flags = 0; struct rdma_rq_sge *rqe = qed_chain_produce(&qp->rq.pbl); /* First one must include the number * of SGE in the list */ if (!i) SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, wr->num_sge); SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO, wr->sg_list[i].lkey); RQ_SGE_SET(rqe, wr->sg_list[i].addr, wr->sg_list[i].length, flags); } /* Special case of no sges. FW requires between 1-4 sges... * in this case we need to post 1 sge with length zero. this is * because rdma write with immediate consumes an RQ. */ if (!wr->num_sge) { u32 flags = 0; struct rdma_rq_sge *rqe = qed_chain_produce(&qp->rq.pbl); /* First one must include the number * of SGE in the list */ SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO, 0); SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1); RQ_SGE_SET(rqe, 0, 0, flags); i = 1; } qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id; qp->rqe_wr_id[qp->rq.prod].wqe_size = i; qedr_inc_sw_prod(&qp->rq); /* qp->rqe_wr_id is accessed during qedr_poll_cq, as * soon as we give the doorbell, we could get a completion * for this wr, therefore we need to make sure that the * memory is update before giving the doorbell. * During qedr_poll_cq, rmb is called before accessing the * cqe. This covers for the smp_rmb as well. */ smp_wmb(); qp->rq.db_data.data.value++; writel(qp->rq.db_data.raw, qp->rq.db); if (rdma_protocol_iwarp(&dev->ibdev, 1)) { writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2); } wr = wr->next; } spin_unlock_irqrestore(&qp->q_lock, flags); return status; } static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe) { struct rdma_cqe_requester *resp_cqe = &cqe->req; return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) == cq->pbl_toggle; } static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe) { struct rdma_cqe_requester *resp_cqe = &cqe->req; struct qedr_qp *qp; qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi, resp_cqe->qp_handle.lo, u64); return qp; } static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe) { struct rdma_cqe_requester *resp_cqe = &cqe->req; return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE); } /* Return latest CQE (needs processing) */ static union rdma_cqe *get_cqe(struct qedr_cq *cq) { return cq->latest_cqe; } /* In fmr we need to increase the number of fmr completed counter for the fmr * algorithm determining whether we can free a pbl or not. * we need to perform this whether the work request was signaled or not. for * this purpose we call this function from the condition that checks if a wr * should be skipped, to make sure we don't miss it ( possibly this fmr * operation was not signalted) */ static inline void qedr_chk_if_fmr(struct qedr_qp *qp) { if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR) qp->wqe_wr_id[qp->sq.cons].mr->info.completed++; } static int process_req(struct qedr_dev *dev, struct qedr_qp *qp, struct qedr_cq *cq, int num_entries, struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status, int force) { u16 cnt = 0; while (num_entries && qp->sq.wqe_cons != hw_cons) { if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) { qedr_chk_if_fmr(qp); /* skip WC */ goto next_cqe; } /* fill WC */ wc->status = status; wc->vendor_err = 0; wc->wc_flags = 0; wc->src_qp = qp->id; wc->qp = &qp->ibqp; wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id; wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode; switch (wc->opcode) { case IB_WC_RDMA_WRITE: wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len; break; case IB_WC_COMP_SWAP: case IB_WC_FETCH_ADD: wc->byte_len = 8; break; case IB_WC_REG_MR: qp->wqe_wr_id[qp->sq.cons].mr->info.completed++; break; case IB_WC_RDMA_READ: case IB_WC_SEND: wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len; break; default: break; } num_entries--; wc++; cnt++; next_cqe: while (qp->wqe_wr_id[qp->sq.cons].wqe_size--) qed_chain_consume(&qp->sq.pbl); qedr_inc_sw_cons(&qp->sq); } return cnt; } static int qedr_poll_cq_req(struct qedr_dev *dev, struct qedr_qp *qp, struct qedr_cq *cq, int num_entries, struct ib_wc *wc, struct rdma_cqe_requester *req) { int cnt = 0; switch (req->status) { case RDMA_CQE_REQ_STS_OK: cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons, IB_WC_SUCCESS, 0); break; case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR: if (qp->state != QED_ROCE_QP_STATE_ERR) DP_DEBUG(dev, QEDR_MSG_CQ, "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n", cq->icid, qp->icid); cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons, IB_WC_WR_FLUSH_ERR, 1); break; default: /* process all WQE before the cosumer */ qp->state = QED_ROCE_QP_STATE_ERR; cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons - 1, IB_WC_SUCCESS, 0); wc += cnt; /* if we have extra WC fill it with actual error info */ if (cnt < num_entries) { enum ib_wc_status wc_status; switch (req->status) { case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR: DP_ERR(dev, "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n", cq->icid, qp->icid); wc_status = IB_WC_BAD_RESP_ERR; break; case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR: DP_ERR(dev, "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n", cq->icid, qp->icid); wc_status = IB_WC_LOC_LEN_ERR; break; case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR: DP_ERR(dev, "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n", cq->icid, qp->icid); wc_status = IB_WC_LOC_QP_OP_ERR; break; case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR: DP_ERR(dev, "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n", cq->icid, qp->icid); wc_status = IB_WC_LOC_PROT_ERR; break; case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR: DP_ERR(dev, "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n", cq->icid, qp->icid); wc_status = IB_WC_MW_BIND_ERR; break; case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR: DP_ERR(dev, "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n", cq->icid, qp->icid); wc_status = IB_WC_REM_INV_REQ_ERR; break; case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR: DP_ERR(dev, "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n", cq->icid, qp->icid); wc_status = IB_WC_REM_ACCESS_ERR; break; case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR: DP_ERR(dev, "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n", cq->icid, qp->icid); wc_status = IB_WC_REM_OP_ERR; break; case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR: DP_ERR(dev, "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n", cq->icid, qp->icid); wc_status = IB_WC_RNR_RETRY_EXC_ERR; break; case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR: DP_ERR(dev, "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n", cq->icid, qp->icid); wc_status = IB_WC_RETRY_EXC_ERR; break; default: DP_ERR(dev, "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n", cq->icid, qp->icid); wc_status = IB_WC_GENERAL_ERR; } cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons, wc_status, 1); } } return cnt; } static inline int qedr_cqe_resp_status_to_ib(u8 status) { switch (status) { case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR: return IB_WC_LOC_ACCESS_ERR; case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR: return IB_WC_LOC_LEN_ERR; case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR: return IB_WC_LOC_QP_OP_ERR; case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR: return IB_WC_LOC_PROT_ERR; case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR: return IB_WC_MW_BIND_ERR; case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR: return IB_WC_REM_INV_RD_REQ_ERR; case RDMA_CQE_RESP_STS_OK: return IB_WC_SUCCESS; default: return IB_WC_GENERAL_ERR; } } static inline int qedr_set_ok_cqe_resp_wc(struct rdma_cqe_responder *resp, struct ib_wc *wc) { wc->status = IB_WC_SUCCESS; wc->byte_len = le32_to_cpu(resp->length); if (resp->flags & QEDR_RESP_IMM) { wc->ex.imm_data = cpu_to_be32(le32_to_cpu(resp->imm_data_or_inv_r_Key)); wc->wc_flags |= IB_WC_WITH_IMM; if (resp->flags & QEDR_RESP_RDMA) wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; if (resp->flags & QEDR_RESP_INV) return -EINVAL; } else if (resp->flags & QEDR_RESP_INV) { wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key); wc->wc_flags |= IB_WC_WITH_INVALIDATE; if (resp->flags & QEDR_RESP_RDMA) return -EINVAL; } else if (resp->flags & QEDR_RESP_RDMA) { return -EINVAL; } return 0; } static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp, struct qedr_cq *cq, struct ib_wc *wc, struct rdma_cqe_responder *resp, u64 wr_id) { /* Must fill fields before qedr_set_ok_cqe_resp_wc() */ wc->opcode = IB_WC_RECV; wc->wc_flags = 0; if (likely(resp->status == RDMA_CQE_RESP_STS_OK)) { if (qedr_set_ok_cqe_resp_wc(resp, wc)) DP_ERR(dev, "CQ %p (icid=%d) has invalid CQE responder flags=0x%x\n", cq, cq->icid, resp->flags); } else { wc->status = qedr_cqe_resp_status_to_ib(resp->status); if (wc->status == IB_WC_GENERAL_ERR) DP_ERR(dev, "CQ %p (icid=%d) contains an invalid CQE status %d\n", cq, cq->icid, resp->status); } /* Fill the rest of the WC */ wc->vendor_err = 0; wc->src_qp = qp->id; wc->qp = &qp->ibqp; wc->wr_id = wr_id; } static int process_resp_one_srq(struct qedr_dev *dev, struct qedr_qp *qp, struct qedr_cq *cq, struct ib_wc *wc, struct rdma_cqe_responder *resp) { struct qedr_srq *srq = qp->srq; u64 wr_id; wr_id = HILO_GEN(le32_to_cpu(resp->srq_wr_id.hi), le32_to_cpu(resp->srq_wr_id.lo), u64); if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) { wc->status = IB_WC_WR_FLUSH_ERR; wc->vendor_err = 0; wc->wr_id = wr_id; wc->byte_len = 0; wc->src_qp = qp->id; wc->qp = &qp->ibqp; wc->wr_id = wr_id; } else { __process_resp_one(dev, qp, cq, wc, resp, wr_id); } atomic_inc(&srq->hw_srq.wr_cons_cnt); return 1; } static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp, struct qedr_cq *cq, struct ib_wc *wc, struct rdma_cqe_responder *resp) { u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id; __process_resp_one(dev, qp, cq, wc, resp, wr_id); while (qp->rqe_wr_id[qp->rq.cons].wqe_size--) qed_chain_consume(&qp->rq.pbl); qedr_inc_sw_cons(&qp->rq); return 1; } static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq, int num_entries, struct ib_wc *wc, u16 hw_cons) { u16 cnt = 0; while (num_entries && qp->rq.wqe_cons != hw_cons) { /* fill WC */ wc->status = IB_WC_WR_FLUSH_ERR; wc->vendor_err = 0; wc->wc_flags = 0; wc->src_qp = qp->id; wc->byte_len = 0; wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id; wc->qp = &qp->ibqp; num_entries--; wc++; cnt++; while (qp->rqe_wr_id[qp->rq.cons].wqe_size--) qed_chain_consume(&qp->rq.pbl); qedr_inc_sw_cons(&qp->rq); } return cnt; } static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp, struct rdma_cqe_responder *resp, int *update) { if (le16_to_cpu(resp->rq_cons_or_srq_id) == qp->rq.wqe_cons) { consume_cqe(cq); *update |= 1; } } static int qedr_poll_cq_resp_srq(struct qedr_dev *dev, struct qedr_qp *qp, struct qedr_cq *cq, int num_entries, struct ib_wc *wc, struct rdma_cqe_responder *resp) { int cnt; cnt = process_resp_one_srq(dev, qp, cq, wc, resp); consume_cqe(cq); return cnt; } static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp, struct qedr_cq *cq, int num_entries, struct ib_wc *wc, struct rdma_cqe_responder *resp, int *update) { int cnt; if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) { cnt = process_resp_flush(qp, cq, num_entries, wc, resp->rq_cons_or_srq_id); try_consume_resp_cqe(cq, qp, resp, update); } else { cnt = process_resp_one(dev, qp, cq, wc, resp); consume_cqe(cq); *update |= 1; } return cnt; } static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp, struct rdma_cqe_requester *req, int *update) { if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) { consume_cqe(cq); *update |= 1; } } int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) { struct qedr_dev *dev = get_qedr_dev(ibcq->device); struct qedr_cq *cq = get_qedr_cq(ibcq); union rdma_cqe *cqe; u32 old_cons, new_cons; unsigned long flags; int update = 0; int done = 0; if (cq->destroyed) { DP_ERR(dev, "warning: poll was invoked after destroy for cq %p (icid=%d)\n", cq, cq->icid); return 0; } if (cq->cq_type == QEDR_CQ_TYPE_GSI) return qedr_gsi_poll_cq(ibcq, num_entries, wc); spin_lock_irqsave(&cq->cq_lock, flags); cqe = cq->latest_cqe; old_cons = qed_chain_get_cons_idx_u32(&cq->pbl); while (num_entries && is_valid_cqe(cq, cqe)) { struct qedr_qp *qp; int cnt = 0; /* prevent speculative reads of any field of CQE */ rmb(); qp = cqe_get_qp(cqe); if (!qp) { WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe); break; } wc->qp = &qp->ibqp; switch (cqe_get_type(cqe)) { case RDMA_CQE_TYPE_REQUESTER: cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc, &cqe->req); try_consume_req_cqe(cq, qp, &cqe->req, &update); break; case RDMA_CQE_TYPE_RESPONDER_RQ: cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc, &cqe->resp, &update); break; case RDMA_CQE_TYPE_RESPONDER_SRQ: cnt = qedr_poll_cq_resp_srq(dev, qp, cq, num_entries, wc, &cqe->resp); update = 1; break; case RDMA_CQE_TYPE_INVALID: default: DP_ERR(dev, "Error: invalid CQE type = %d\n", cqe_get_type(cqe)); } num_entries -= cnt; wc += cnt; done += cnt; cqe = get_cqe(cq); } new_cons = qed_chain_get_cons_idx_u32(&cq->pbl); cq->cq_cons += new_cons - old_cons; if (update) /* doorbell notifies abount latest VALID entry, * but chain already point to the next INVALID one */ doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags); spin_unlock_irqrestore(&cq->cq_lock, flags); return done; } int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags, u32 port_num, const struct ib_wc *in_wc, const struct ib_grh *in_grh, const struct ib_mad *in, struct ib_mad *out_mad, size_t *out_mad_size, u16 *out_mad_pkey_index) { return IB_MAD_RESULT_SUCCESS; }
linux-master
drivers/infiniband/hw/qedr/verbs.c
/* * Copyright (c) 2005 Cisco Systems. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/slab.h> #include <linux/string.h> #include <linux/sched.h> #include <asm/io.h> #include <rdma/uverbs_ioctl.h> #include "mthca_dev.h" #include "mthca_cmd.h" #include "mthca_memfree.h" #include "mthca_wqe.h" enum { MTHCA_MAX_DIRECT_SRQ_SIZE = 4 * PAGE_SIZE }; struct mthca_tavor_srq_context { __be64 wqe_base_ds; /* low 6 bits is descriptor size */ __be32 state_pd; __be32 lkey; __be32 uar; __be16 limit_watermark; __be16 wqe_cnt; u32 reserved[2]; }; struct mthca_arbel_srq_context { __be32 state_logsize_srqn; __be32 lkey; __be32 db_index; __be32 logstride_usrpage; __be64 wqe_base; __be32 eq_pd; __be16 limit_watermark; __be16 wqe_cnt; u16 reserved1; __be16 wqe_counter; u32 reserved2[3]; }; static void *get_wqe(struct mthca_srq *srq, int n) { if (srq->is_direct) return srq->queue.direct.buf + (n << srq->wqe_shift); else return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf + ((n << srq->wqe_shift) & (PAGE_SIZE - 1)); } /* * Return a pointer to the location within a WQE that we're using as a * link when the WQE is in the free list. We use the imm field * because in the Tavor case, posting a WQE may overwrite the next * segment of the previous WQE, but a receive WQE will never touch the * imm field. This avoids corrupting our free list if the previous * WQE has already completed and been put on the free list when we * post the next WQE. */ static inline int *wqe_to_link(void *wqe) { return (int *) (wqe + offsetof(struct mthca_next_seg, imm)); } static void mthca_tavor_init_srq_context(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_srq *srq, struct mthca_tavor_srq_context *context, struct ib_udata *udata) { struct mthca_ucontext *ucontext = rdma_udata_to_drv_context( udata, struct mthca_ucontext, ibucontext); memset(context, 0, sizeof *context); context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4)); context->state_pd = cpu_to_be32(pd->pd_num); context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); if (udata) context->uar = cpu_to_be32(ucontext->uar.index); else context->uar = cpu_to_be32(dev->driver_uar.index); } static void mthca_arbel_init_srq_context(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_srq *srq, struct mthca_arbel_srq_context *context, struct ib_udata *udata) { struct mthca_ucontext *ucontext = rdma_udata_to_drv_context( udata, struct mthca_ucontext, ibucontext); int logsize, max; memset(context, 0, sizeof *context); /* * Put max in a temporary variable to work around gcc bug * triggered by ilog2() on sparc64. */ max = srq->max; logsize = ilog2(max); context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn); context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); context->db_index = cpu_to_be32(srq->db_index); context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29); if (udata) context->logstride_usrpage |= cpu_to_be32(ucontext->uar.index); else context->logstride_usrpage |= cpu_to_be32(dev->driver_uar.index); context->eq_pd = cpu_to_be32(MTHCA_EQ_ASYNC << 24 | pd->pd_num); } static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq) { mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue, srq->is_direct, &srq->mr); kfree(srq->wrid); } static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_srq *srq, struct ib_udata *udata) { struct mthca_data_seg *scatter; void *wqe; int err; int i; if (udata) return 0; srq->wrid = kmalloc_array(srq->max, sizeof(u64), GFP_KERNEL); if (!srq->wrid) return -ENOMEM; err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift, MTHCA_MAX_DIRECT_SRQ_SIZE, &srq->queue, &srq->is_direct, pd, 1, &srq->mr); if (err) { kfree(srq->wrid); return err; } /* * Now initialize the SRQ buffer so that all of the WQEs are * linked into the list of free WQEs. In addition, set the * scatter list L_Keys to the sentry value of 0x100. */ for (i = 0; i < srq->max; ++i) { struct mthca_next_seg *next; next = wqe = get_wqe(srq, i); if (i < srq->max - 1) { *wqe_to_link(wqe) = i + 1; next->nda_op = htonl(((i + 1) << srq->wqe_shift) | 1); } else { *wqe_to_link(wqe) = -1; next->nda_op = 0; } for (scatter = wqe + sizeof (struct mthca_next_seg); (void *) scatter < wqe + (1 << srq->wqe_shift); ++scatter) scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); } srq->last = get_wqe(srq, srq->max - 1); return 0; } int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, struct ib_srq_attr *attr, struct mthca_srq *srq, struct ib_udata *udata) { struct mthca_mailbox *mailbox; int ds; int err; /* Sanity check SRQ size before proceeding */ if (attr->max_wr > dev->limits.max_srq_wqes || attr->max_sge > dev->limits.max_srq_sge) return -EINVAL; srq->max = attr->max_wr; srq->max_gs = attr->max_sge; srq->counter = 0; if (mthca_is_memfree(dev)) srq->max = roundup_pow_of_two(srq->max + 1); else srq->max = srq->max + 1; ds = max(64UL, roundup_pow_of_two(sizeof (struct mthca_next_seg) + srq->max_gs * sizeof (struct mthca_data_seg))); if (!mthca_is_memfree(dev) && (ds > dev->limits.max_desc_sz)) return -EINVAL; srq->wqe_shift = ilog2(ds); srq->srqn = mthca_alloc(&dev->srq_table.alloc); if (srq->srqn == -1) return -ENOMEM; if (mthca_is_memfree(dev)) { err = mthca_table_get(dev, dev->srq_table.table, srq->srqn); if (err) goto err_out; if (!udata) { srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ, srq->srqn, &srq->db); if (srq->db_index < 0) { err = -ENOMEM; goto err_out_icm; } } } mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) { err = PTR_ERR(mailbox); goto err_out_db; } err = mthca_alloc_srq_buf(dev, pd, srq, udata); if (err) goto err_out_mailbox; spin_lock_init(&srq->lock); srq->refcount = 1; init_waitqueue_head(&srq->wait); mutex_init(&srq->mutex); if (mthca_is_memfree(dev)) mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf, udata); else mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf, udata); err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn); if (err) { mthca_warn(dev, "SW2HW_SRQ failed (%d)\n", err); goto err_out_free_buf; } spin_lock_irq(&dev->srq_table.lock); if (mthca_array_set(&dev->srq_table.srq, srq->srqn & (dev->limits.num_srqs - 1), srq)) { spin_unlock_irq(&dev->srq_table.lock); goto err_out_free_srq; } spin_unlock_irq(&dev->srq_table.lock); mthca_free_mailbox(dev, mailbox); srq->first_free = 0; srq->last_free = srq->max - 1; attr->max_wr = srq->max - 1; attr->max_sge = srq->max_gs; return 0; err_out_free_srq: err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn); if (err) mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err); err_out_free_buf: if (!udata) mthca_free_srq_buf(dev, srq); err_out_mailbox: mthca_free_mailbox(dev, mailbox); err_out_db: if (!udata && mthca_is_memfree(dev)) mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index); err_out_icm: mthca_table_put(dev, dev->srq_table.table, srq->srqn); err_out: mthca_free(&dev->srq_table.alloc, srq->srqn); return err; } static inline int get_srq_refcount(struct mthca_dev *dev, struct mthca_srq *srq) { int c; spin_lock_irq(&dev->srq_table.lock); c = srq->refcount; spin_unlock_irq(&dev->srq_table.lock); return c; } void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) { struct mthca_mailbox *mailbox; int err; mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) { mthca_warn(dev, "No memory for mailbox to free SRQ.\n"); return; } err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn); if (err) mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err); spin_lock_irq(&dev->srq_table.lock); mthca_array_clear(&dev->srq_table.srq, srq->srqn & (dev->limits.num_srqs - 1)); --srq->refcount; spin_unlock_irq(&dev->srq_table.lock); wait_event(srq->wait, !get_srq_refcount(dev, srq)); if (!srq->ibsrq.uobject) { mthca_free_srq_buf(dev, srq); if (mthca_is_memfree(dev)) mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index); } mthca_table_put(dev, dev->srq_table.table, srq->srqn); mthca_free(&dev->srq_table.alloc, srq->srqn); mthca_free_mailbox(dev, mailbox); } int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) { struct mthca_dev *dev = to_mdev(ibsrq->device); struct mthca_srq *srq = to_msrq(ibsrq); int ret = 0; /* We don't support resizing SRQs (yet?) */ if (attr_mask & IB_SRQ_MAX_WR) return -EINVAL; if (attr_mask & IB_SRQ_LIMIT) { u32 max_wr = mthca_is_memfree(dev) ? srq->max - 1 : srq->max; if (attr->srq_limit > max_wr) return -EINVAL; mutex_lock(&srq->mutex); ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit); mutex_unlock(&srq->mutex); } return ret; } int mthca_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) { struct mthca_dev *dev = to_mdev(ibsrq->device); struct mthca_srq *srq = to_msrq(ibsrq); struct mthca_mailbox *mailbox; struct mthca_arbel_srq_context *arbel_ctx; struct mthca_tavor_srq_context *tavor_ctx; int err; mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox); if (err) goto out; if (mthca_is_memfree(dev)) { arbel_ctx = mailbox->buf; srq_attr->srq_limit = be16_to_cpu(arbel_ctx->limit_watermark); } else { tavor_ctx = mailbox->buf; srq_attr->srq_limit = be16_to_cpu(tavor_ctx->limit_watermark); } srq_attr->max_wr = srq->max - 1; srq_attr->max_sge = srq->max_gs; out: mthca_free_mailbox(dev, mailbox); return err; } void mthca_srq_event(struct mthca_dev *dev, u32 srqn, enum ib_event_type event_type) { struct mthca_srq *srq; struct ib_event event; spin_lock(&dev->srq_table.lock); srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1)); if (srq) ++srq->refcount; spin_unlock(&dev->srq_table.lock); if (!srq) { mthca_warn(dev, "Async event for bogus SRQ %08x\n", srqn); return; } if (!srq->ibsrq.event_handler) goto out; event.device = &dev->ib_dev; event.event = event_type; event.element.srq = &srq->ibsrq; srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); out: spin_lock(&dev->srq_table.lock); if (!--srq->refcount) wake_up(&srq->wait); spin_unlock(&dev->srq_table.lock); } /* * This function must be called with IRQs disabled. */ void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr) { int ind; struct mthca_next_seg *last_free; ind = wqe_addr >> srq->wqe_shift; spin_lock(&srq->lock); last_free = get_wqe(srq, srq->last_free); *wqe_to_link(last_free) = ind; last_free->nda_op = htonl((ind << srq->wqe_shift) | 1); *wqe_to_link(get_wqe(srq, ind)) = -1; srq->last_free = ind; spin_unlock(&srq->lock); } int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) { struct mthca_dev *dev = to_mdev(ibsrq->device); struct mthca_srq *srq = to_msrq(ibsrq); unsigned long flags; int err = 0; int first_ind; int ind; int next_ind; int nreq; int i; void *wqe; void *prev_wqe; spin_lock_irqsave(&srq->lock, flags); first_ind = srq->first_free; for (nreq = 0; wr; wr = wr->next) { ind = srq->first_free; wqe = get_wqe(srq, ind); next_ind = *wqe_to_link(wqe); if (unlikely(next_ind < 0)) { mthca_err(dev, "SRQ %06x full\n", srq->srqn); err = -ENOMEM; *bad_wr = wr; break; } prev_wqe = srq->last; srq->last = wqe; ((struct mthca_next_seg *) wqe)->ee_nds = 0; /* flags field will always remain 0 */ wqe += sizeof (struct mthca_next_seg); if (unlikely(wr->num_sge > srq->max_gs)) { err = -EINVAL; *bad_wr = wr; srq->last = prev_wqe; break; } for (i = 0; i < wr->num_sge; ++i) { mthca_set_data_seg(wqe, wr->sg_list + i); wqe += sizeof (struct mthca_data_seg); } if (i < srq->max_gs) mthca_set_data_seg_inval(wqe); ((struct mthca_next_seg *) prev_wqe)->ee_nds = cpu_to_be32(MTHCA_NEXT_DBD); srq->wrid[ind] = wr->wr_id; srq->first_free = next_ind; ++nreq; if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { nreq = 0; /* * Make sure that descriptors are written * before doorbell is rung. */ wmb(); mthca_write64(first_ind << srq->wqe_shift, srq->srqn << 8, dev->kar + MTHCA_RECEIVE_DOORBELL, MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); first_ind = srq->first_free; } } if (likely(nreq)) { /* * Make sure that descriptors are written before * doorbell is rung. */ wmb(); mthca_write64(first_ind << srq->wqe_shift, (srq->srqn << 8) | nreq, dev->kar + MTHCA_RECEIVE_DOORBELL, MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); } spin_unlock_irqrestore(&srq->lock, flags); return err; } int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) { struct mthca_dev *dev = to_mdev(ibsrq->device); struct mthca_srq *srq = to_msrq(ibsrq); unsigned long flags; int err = 0; int ind; int next_ind; int nreq; int i; void *wqe; spin_lock_irqsave(&srq->lock, flags); for (nreq = 0; wr; ++nreq, wr = wr->next) { ind = srq->first_free; wqe = get_wqe(srq, ind); next_ind = *wqe_to_link(wqe); if (unlikely(next_ind < 0)) { mthca_err(dev, "SRQ %06x full\n", srq->srqn); err = -ENOMEM; *bad_wr = wr; break; } ((struct mthca_next_seg *) wqe)->ee_nds = 0; /* flags field will always remain 0 */ wqe += sizeof (struct mthca_next_seg); if (unlikely(wr->num_sge > srq->max_gs)) { err = -EINVAL; *bad_wr = wr; break; } for (i = 0; i < wr->num_sge; ++i) { mthca_set_data_seg(wqe, wr->sg_list + i); wqe += sizeof (struct mthca_data_seg); } if (i < srq->max_gs) mthca_set_data_seg_inval(wqe); srq->wrid[ind] = wr->wr_id; srq->first_free = next_ind; } if (likely(nreq)) { srq->counter += nreq; /* * Make sure that descriptors are written before * we write doorbell record. */ wmb(); *srq->db = cpu_to_be32(srq->counter); } spin_unlock_irqrestore(&srq->lock, flags); return err; } int mthca_max_srq_sge(struct mthca_dev *dev) { if (mthca_is_memfree(dev)) return dev->limits.max_sg; /* * SRQ allocations are based on powers of 2 for Tavor, * (although they only need to be multiples of 16 bytes). * * Therefore, we need to base the max number of sg entries on * the largest power of 2 descriptor size that is <= to the * actual max WQE descriptor size, rather than return the * max_sg value given by the firmware (which is based on WQE * sizes as multiples of 16, not powers of 2). * * If SRQ implementation is changed for Tavor to be based on * multiples of 16, the calculation below can be deleted and * the FW max_sg value returned. */ return min_t(int, dev->limits.max_sg, ((1 << (fls(dev->limits.max_desc_sz) - 1)) - sizeof (struct mthca_next_seg)) / sizeof (struct mthca_data_seg)); } int mthca_init_srq_table(struct mthca_dev *dev) { int err; if (!(dev->mthca_flags & MTHCA_FLAG_SRQ)) return 0; spin_lock_init(&dev->srq_table.lock); err = mthca_alloc_init(&dev->srq_table.alloc, dev->limits.num_srqs, dev->limits.num_srqs - 1, dev->limits.reserved_srqs); if (err) return err; err = mthca_array_init(&dev->srq_table.srq, dev->limits.num_srqs); if (err) mthca_alloc_cleanup(&dev->srq_table.alloc); return err; } void mthca_cleanup_srq_table(struct mthca_dev *dev) { if (!(dev->mthca_flags & MTHCA_FLAG_SRQ)) return; mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs); mthca_alloc_cleanup(&dev->srq_table.alloc); }
linux-master
drivers/infiniband/hw/mthca/mthca_srq.c
/* * Copyright (c) 2005 Topspin Communications. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <asm/page.h> /* PAGE_SHIFT */ #include "mthca_dev.h" #include "mthca_memfree.h" int mthca_uar_alloc(struct mthca_dev *dev, struct mthca_uar *uar) { uar->index = mthca_alloc(&dev->uar_table.alloc); if (uar->index == -1) return -ENOMEM; uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index; return 0; } void mthca_uar_free(struct mthca_dev *dev, struct mthca_uar *uar) { mthca_free(&dev->uar_table.alloc, uar->index); } int mthca_init_uar_table(struct mthca_dev *dev) { int ret; ret = mthca_alloc_init(&dev->uar_table.alloc, dev->limits.num_uars, dev->limits.num_uars - 1, dev->limits.reserved_uars + 1); if (ret) return ret; ret = mthca_init_db_tab(dev); if (ret) mthca_alloc_cleanup(&dev->uar_table.alloc); return ret; } void mthca_cleanup_uar_table(struct mthca_dev *dev) { mthca_cleanup_db_tab(dev); /* XXX check if any UARs are still allocated? */ mthca_alloc_cleanup(&dev->uar_table.alloc); }
linux-master
drivers/infiniband/hw/mthca/mthca_uar.c
/* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/gfp.h> #include "mthca_dev.h" #include "mthca_config_reg.h" #include "mthca_cmd.h" #include "mthca_profile.h" #include "mthca_memfree.h" #include "mthca_wqe.h" MODULE_AUTHOR("Roland Dreier"); MODULE_DESCRIPTION("Mellanox InfiniBand HCA low-level driver"); MODULE_LICENSE("Dual BSD/GPL"); #ifdef CONFIG_INFINIBAND_MTHCA_DEBUG int mthca_debug_level = 0; module_param_named(debug_level, mthca_debug_level, int, 0644); MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); #endif /* CONFIG_INFINIBAND_MTHCA_DEBUG */ #ifdef CONFIG_PCI_MSI static int msi_x = 1; module_param(msi_x, int, 0444); MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); #else /* CONFIG_PCI_MSI */ #define msi_x (0) #endif /* CONFIG_PCI_MSI */ static int tune_pci = 0; module_param(tune_pci, int, 0444); MODULE_PARM_DESC(tune_pci, "increase PCI burst from the default set by BIOS if nonzero"); DEFINE_MUTEX(mthca_device_mutex); #define MTHCA_DEFAULT_NUM_QP (1 << 16) #define MTHCA_DEFAULT_RDB_PER_QP (1 << 2) #define MTHCA_DEFAULT_NUM_CQ (1 << 16) #define MTHCA_DEFAULT_NUM_MCG (1 << 13) #define MTHCA_DEFAULT_NUM_MPT (1 << 17) #define MTHCA_DEFAULT_NUM_MTT (1 << 20) #define MTHCA_DEFAULT_NUM_UDAV (1 << 15) #define MTHCA_DEFAULT_NUM_RESERVED_MTTS (1 << 18) #define MTHCA_DEFAULT_NUM_UARC_SIZE (1 << 18) static struct mthca_profile hca_profile = { .num_qp = MTHCA_DEFAULT_NUM_QP, .rdb_per_qp = MTHCA_DEFAULT_RDB_PER_QP, .num_cq = MTHCA_DEFAULT_NUM_CQ, .num_mcg = MTHCA_DEFAULT_NUM_MCG, .num_mpt = MTHCA_DEFAULT_NUM_MPT, .num_mtt = MTHCA_DEFAULT_NUM_MTT, .num_udav = MTHCA_DEFAULT_NUM_UDAV, /* Tavor only */ .fmr_reserved_mtts = MTHCA_DEFAULT_NUM_RESERVED_MTTS, /* Tavor only */ .uarc_size = MTHCA_DEFAULT_NUM_UARC_SIZE, /* Arbel only */ }; module_param_named(num_qp, hca_profile.num_qp, int, 0444); MODULE_PARM_DESC(num_qp, "maximum number of QPs per HCA"); module_param_named(rdb_per_qp, hca_profile.rdb_per_qp, int, 0444); MODULE_PARM_DESC(rdb_per_qp, "number of RDB buffers per QP"); module_param_named(num_cq, hca_profile.num_cq, int, 0444); MODULE_PARM_DESC(num_cq, "maximum number of CQs per HCA"); module_param_named(num_mcg, hca_profile.num_mcg, int, 0444); MODULE_PARM_DESC(num_mcg, "maximum number of multicast groups per HCA"); module_param_named(num_mpt, hca_profile.num_mpt, int, 0444); MODULE_PARM_DESC(num_mpt, "maximum number of memory protection table entries per HCA"); module_param_named(num_mtt, hca_profile.num_mtt, int, 0444); MODULE_PARM_DESC(num_mtt, "maximum number of memory translation table segments per HCA"); module_param_named(num_udav, hca_profile.num_udav, int, 0444); MODULE_PARM_DESC(num_udav, "maximum number of UD address vectors per HCA"); module_param_named(fmr_reserved_mtts, hca_profile.fmr_reserved_mtts, int, 0444); MODULE_PARM_DESC(fmr_reserved_mtts, "number of memory translation table segments reserved for FMR"); static int log_mtts_per_seg = ilog2(MTHCA_MTT_SEG_SIZE / 8); module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)"); static char mthca_version[] = DRV_NAME ": Mellanox InfiniBand HCA driver v" DRV_VERSION " (" DRV_RELDATE ")\n"; static int mthca_tune_pci(struct mthca_dev *mdev) { if (!tune_pci) return 0; /* First try to max out Read Byte Count */ if (pci_find_capability(mdev->pdev, PCI_CAP_ID_PCIX)) { if (pcix_set_mmrbc(mdev->pdev, pcix_get_max_mmrbc(mdev->pdev))) { mthca_err(mdev, "Couldn't set PCI-X max read count, " "aborting.\n"); return -ENODEV; } } else if (!(mdev->mthca_flags & MTHCA_FLAG_PCIE)) mthca_info(mdev, "No PCI-X capability, not setting RBC.\n"); if (pci_is_pcie(mdev->pdev)) { if (pcie_set_readrq(mdev->pdev, 4096)) { mthca_err(mdev, "Couldn't write PCI Express read request, " "aborting.\n"); return -ENODEV; } } else if (mdev->mthca_flags & MTHCA_FLAG_PCIE) mthca_info(mdev, "No PCI Express capability, " "not setting Max Read Request Size.\n"); return 0; } static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim) { int err; mdev->limits.mtt_seg_size = (1 << log_mtts_per_seg) * 8; err = mthca_QUERY_DEV_LIM(mdev, dev_lim); if (err) { mthca_err(mdev, "QUERY_DEV_LIM command returned %d" ", aborting.\n", err); return err; } if (dev_lim->min_page_sz > PAGE_SIZE) { mthca_err(mdev, "HCA minimum page size of %d bigger than " "kernel PAGE_SIZE of %ld, aborting.\n", dev_lim->min_page_sz, PAGE_SIZE); return -ENODEV; } if (dev_lim->num_ports > MTHCA_MAX_PORTS) { mthca_err(mdev, "HCA has %d ports, but we only support %d, " "aborting.\n", dev_lim->num_ports, MTHCA_MAX_PORTS); return -ENODEV; } if (dev_lim->uar_size > pci_resource_len(mdev->pdev, 2)) { mthca_err(mdev, "HCA reported UAR size of 0x%x bigger than " "PCI resource 2 size of 0x%llx, aborting.\n", dev_lim->uar_size, (unsigned long long)pci_resource_len(mdev->pdev, 2)); return -ENODEV; } mdev->limits.num_ports = dev_lim->num_ports; mdev->limits.vl_cap = dev_lim->max_vl; mdev->limits.mtu_cap = dev_lim->max_mtu; mdev->limits.gid_table_len = dev_lim->max_gids; mdev->limits.pkey_table_len = dev_lim->max_pkeys; mdev->limits.local_ca_ack_delay = dev_lim->local_ca_ack_delay; /* * Need to allow for worst case send WQE overhead and check * whether max_desc_sz imposes a lower limit than max_sg; UD * send has the biggest overhead. */ mdev->limits.max_sg = min_t(int, dev_lim->max_sg, (dev_lim->max_desc_sz - sizeof (struct mthca_next_seg) - (mthca_is_memfree(mdev) ? sizeof (struct mthca_arbel_ud_seg) : sizeof (struct mthca_tavor_ud_seg))) / sizeof (struct mthca_data_seg)); mdev->limits.max_wqes = dev_lim->max_qp_sz; mdev->limits.max_qp_init_rdma = dev_lim->max_requester_per_qp; mdev->limits.reserved_qps = dev_lim->reserved_qps; mdev->limits.max_srq_wqes = dev_lim->max_srq_sz; mdev->limits.reserved_srqs = dev_lim->reserved_srqs; mdev->limits.reserved_eecs = dev_lim->reserved_eecs; mdev->limits.max_desc_sz = dev_lim->max_desc_sz; mdev->limits.max_srq_sge = mthca_max_srq_sge(mdev); /* * Subtract 1 from the limit because we need to allocate a * spare CQE so the HCA HW can tell the difference between an * empty CQ and a full CQ. */ mdev->limits.max_cqes = dev_lim->max_cq_sz - 1; mdev->limits.reserved_cqs = dev_lim->reserved_cqs; mdev->limits.reserved_eqs = dev_lim->reserved_eqs; mdev->limits.reserved_mtts = dev_lim->reserved_mtts; mdev->limits.reserved_mrws = dev_lim->reserved_mrws; mdev->limits.reserved_uars = dev_lim->reserved_uars; mdev->limits.reserved_pds = dev_lim->reserved_pds; mdev->limits.port_width_cap = dev_lim->max_port_width; mdev->limits.page_size_cap = ~(u32) (dev_lim->min_page_sz - 1); mdev->limits.flags = dev_lim->flags; /* * For old FW that doesn't return static rate support, use a * value of 0x3 (only static rate values of 0 or 1 are handled), * except on Sinai, where even old FW can handle static rate * values of 2 and 3. */ if (dev_lim->stat_rate_support) mdev->limits.stat_rate_support = dev_lim->stat_rate_support; else if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT) mdev->limits.stat_rate_support = 0xf; else mdev->limits.stat_rate_support = 0x3; /* IB_DEVICE_RESIZE_MAX_WR not supported by driver. May be doable since hardware supports it for SRQ. IB_DEVICE_N_NOTIFY_CQ is supported by hardware but not by driver. IB_DEVICE_SRQ_RESIZE is supported by hardware but SRQ is not supported by driver. */ mdev->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN; if (dev_lim->flags & DEV_LIM_FLAG_BAD_PKEY_CNTR) mdev->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; if (dev_lim->flags & DEV_LIM_FLAG_BAD_QKEY_CNTR) mdev->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; if (dev_lim->flags & DEV_LIM_FLAG_RAW_MULTI) mdev->device_cap_flags |= IB_DEVICE_RAW_MULTI; if (dev_lim->flags & DEV_LIM_FLAG_AUTO_PATH_MIG) mdev->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; if (dev_lim->flags & DEV_LIM_FLAG_UD_AV_PORT_ENFORCE) mdev->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE; if (dev_lim->flags & DEV_LIM_FLAG_SRQ) mdev->mthca_flags |= MTHCA_FLAG_SRQ; if (mthca_is_memfree(mdev)) if (dev_lim->flags & DEV_LIM_FLAG_IPOIB_CSUM) mdev->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; return 0; } static int mthca_init_tavor(struct mthca_dev *mdev) { s64 size; int err; struct mthca_dev_lim dev_lim; struct mthca_profile profile; struct mthca_init_hca_param init_hca; err = mthca_SYS_EN(mdev); if (err) { mthca_err(mdev, "SYS_EN command returned %d, aborting.\n", err); return err; } err = mthca_QUERY_FW(mdev); if (err) { mthca_err(mdev, "QUERY_FW command returned %d," " aborting.\n", err); goto err_disable; } err = mthca_QUERY_DDR(mdev); if (err) { mthca_err(mdev, "QUERY_DDR command returned %d, aborting.\n", err); goto err_disable; } err = mthca_dev_lim(mdev, &dev_lim); if (err) { mthca_err(mdev, "QUERY_DEV_LIM command returned %d, aborting.\n", err); goto err_disable; } profile = hca_profile; profile.num_uar = dev_lim.uar_size / PAGE_SIZE; profile.uarc_size = 0; if (mdev->mthca_flags & MTHCA_FLAG_SRQ) profile.num_srq = dev_lim.max_srqs; size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca); if (size < 0) { err = size; goto err_disable; } err = mthca_INIT_HCA(mdev, &init_hca); if (err) { mthca_err(mdev, "INIT_HCA command returned %d, aborting.\n", err); goto err_disable; } return 0; err_disable: mthca_SYS_DIS(mdev); return err; } static int mthca_load_fw(struct mthca_dev *mdev) { int err; /* FIXME: use HCA-attached memory for FW if present */ mdev->fw.arbel.fw_icm = mthca_alloc_icm(mdev, mdev->fw.arbel.fw_pages, GFP_HIGHUSER | __GFP_NOWARN, 0); if (!mdev->fw.arbel.fw_icm) { mthca_err(mdev, "Couldn't allocate FW area, aborting.\n"); return -ENOMEM; } err = mthca_MAP_FA(mdev, mdev->fw.arbel.fw_icm); if (err) { mthca_err(mdev, "MAP_FA command returned %d, aborting.\n", err); goto err_free; } err = mthca_RUN_FW(mdev); if (err) { mthca_err(mdev, "RUN_FW command returned %d, aborting.\n", err); goto err_unmap_fa; } return 0; err_unmap_fa: mthca_UNMAP_FA(mdev); err_free: mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0); return err; } static int mthca_init_icm(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim, struct mthca_init_hca_param *init_hca, u64 icm_size) { u64 aux_pages; int err; err = mthca_SET_ICM_SIZE(mdev, icm_size, &aux_pages); if (err) { mthca_err(mdev, "SET_ICM_SIZE command returned %d, aborting.\n", err); return err; } mthca_dbg(mdev, "%lld KB of HCA context requires %lld KB aux memory.\n", (unsigned long long) icm_size >> 10, (unsigned long long) aux_pages << 2); mdev->fw.arbel.aux_icm = mthca_alloc_icm(mdev, aux_pages, GFP_HIGHUSER | __GFP_NOWARN, 0); if (!mdev->fw.arbel.aux_icm) { mthca_err(mdev, "Couldn't allocate aux memory, aborting.\n"); return -ENOMEM; } err = mthca_MAP_ICM_AUX(mdev, mdev->fw.arbel.aux_icm); if (err) { mthca_err(mdev, "MAP_ICM_AUX returned %d, aborting.\n", err); goto err_free_aux; } err = mthca_map_eq_icm(mdev, init_hca->eqc_base); if (err) { mthca_err(mdev, "Failed to map EQ context memory, aborting.\n"); goto err_unmap_aux; } /* CPU writes to non-reserved MTTs, while HCA might DMA to reserved mtts */ mdev->limits.reserved_mtts = ALIGN(mdev->limits.reserved_mtts * mdev->limits.mtt_seg_size, dma_get_cache_alignment()) / mdev->limits.mtt_seg_size; mdev->mr_table.mtt_table = mthca_alloc_icm_table(mdev, init_hca->mtt_base, mdev->limits.mtt_seg_size, mdev->limits.num_mtt_segs, mdev->limits.reserved_mtts, 1, 0); if (!mdev->mr_table.mtt_table) { mthca_err(mdev, "Failed to map MTT context memory, aborting.\n"); err = -ENOMEM; goto err_unmap_eq; } mdev->mr_table.mpt_table = mthca_alloc_icm_table(mdev, init_hca->mpt_base, dev_lim->mpt_entry_sz, mdev->limits.num_mpts, mdev->limits.reserved_mrws, 1, 1); if (!mdev->mr_table.mpt_table) { mthca_err(mdev, "Failed to map MPT context memory, aborting.\n"); err = -ENOMEM; goto err_unmap_mtt; } mdev->qp_table.qp_table = mthca_alloc_icm_table(mdev, init_hca->qpc_base, dev_lim->qpc_entry_sz, mdev->limits.num_qps, mdev->limits.reserved_qps, 0, 0); if (!mdev->qp_table.qp_table) { mthca_err(mdev, "Failed to map QP context memory, aborting.\n"); err = -ENOMEM; goto err_unmap_mpt; } mdev->qp_table.eqp_table = mthca_alloc_icm_table(mdev, init_hca->eqpc_base, dev_lim->eqpc_entry_sz, mdev->limits.num_qps, mdev->limits.reserved_qps, 0, 0); if (!mdev->qp_table.eqp_table) { mthca_err(mdev, "Failed to map EQP context memory, aborting.\n"); err = -ENOMEM; goto err_unmap_qp; } mdev->qp_table.rdb_table = mthca_alloc_icm_table(mdev, init_hca->rdb_base, MTHCA_RDB_ENTRY_SIZE, mdev->limits.num_qps << mdev->qp_table.rdb_shift, 0, 0, 0); if (!mdev->qp_table.rdb_table) { mthca_err(mdev, "Failed to map RDB context memory, aborting\n"); err = -ENOMEM; goto err_unmap_eqp; } mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base, dev_lim->cqc_entry_sz, mdev->limits.num_cqs, mdev->limits.reserved_cqs, 0, 0); if (!mdev->cq_table.table) { mthca_err(mdev, "Failed to map CQ context memory, aborting.\n"); err = -ENOMEM; goto err_unmap_rdb; } if (mdev->mthca_flags & MTHCA_FLAG_SRQ) { mdev->srq_table.table = mthca_alloc_icm_table(mdev, init_hca->srqc_base, dev_lim->srq_entry_sz, mdev->limits.num_srqs, mdev->limits.reserved_srqs, 0, 0); if (!mdev->srq_table.table) { mthca_err(mdev, "Failed to map SRQ context memory, " "aborting.\n"); err = -ENOMEM; goto err_unmap_cq; } } /* * It's not strictly required, but for simplicity just map the * whole multicast group table now. The table isn't very big * and it's a lot easier than trying to track ref counts. */ mdev->mcg_table.table = mthca_alloc_icm_table(mdev, init_hca->mc_base, MTHCA_MGM_ENTRY_SIZE, mdev->limits.num_mgms + mdev->limits.num_amgms, mdev->limits.num_mgms + mdev->limits.num_amgms, 0, 0); if (!mdev->mcg_table.table) { mthca_err(mdev, "Failed to map MCG context memory, aborting.\n"); err = -ENOMEM; goto err_unmap_srq; } return 0; err_unmap_srq: if (mdev->mthca_flags & MTHCA_FLAG_SRQ) mthca_free_icm_table(mdev, mdev->srq_table.table); err_unmap_cq: mthca_free_icm_table(mdev, mdev->cq_table.table); err_unmap_rdb: mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); err_unmap_eqp: mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); err_unmap_qp: mthca_free_icm_table(mdev, mdev->qp_table.qp_table); err_unmap_mpt: mthca_free_icm_table(mdev, mdev->mr_table.mpt_table); err_unmap_mtt: mthca_free_icm_table(mdev, mdev->mr_table.mtt_table); err_unmap_eq: mthca_unmap_eq_icm(mdev); err_unmap_aux: mthca_UNMAP_ICM_AUX(mdev); err_free_aux: mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0); return err; } static void mthca_free_icms(struct mthca_dev *mdev) { mthca_free_icm_table(mdev, mdev->mcg_table.table); if (mdev->mthca_flags & MTHCA_FLAG_SRQ) mthca_free_icm_table(mdev, mdev->srq_table.table); mthca_free_icm_table(mdev, mdev->cq_table.table); mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); mthca_free_icm_table(mdev, mdev->qp_table.qp_table); mthca_free_icm_table(mdev, mdev->mr_table.mpt_table); mthca_free_icm_table(mdev, mdev->mr_table.mtt_table); mthca_unmap_eq_icm(mdev); mthca_UNMAP_ICM_AUX(mdev); mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0); } static int mthca_init_arbel(struct mthca_dev *mdev) { struct mthca_dev_lim dev_lim; struct mthca_profile profile; struct mthca_init_hca_param init_hca; s64 icm_size; int err; err = mthca_QUERY_FW(mdev); if (err) { mthca_err(mdev, "QUERY_FW command failed %d, aborting.\n", err); return err; } err = mthca_ENABLE_LAM(mdev); if (err == -EAGAIN) { mthca_dbg(mdev, "No HCA-attached memory (running in MemFree mode)\n"); mdev->mthca_flags |= MTHCA_FLAG_NO_LAM; } else if (err) { mthca_err(mdev, "ENABLE_LAM returned %d, aborting.\n", err); return err; } err = mthca_load_fw(mdev); if (err) { mthca_err(mdev, "Loading FW returned %d, aborting.\n", err); goto err_disable; } err = mthca_dev_lim(mdev, &dev_lim); if (err) { mthca_err(mdev, "QUERY_DEV_LIM returned %d, aborting.\n", err); goto err_stop_fw; } profile = hca_profile; profile.num_uar = dev_lim.uar_size / PAGE_SIZE; profile.num_udav = 0; if (mdev->mthca_flags & MTHCA_FLAG_SRQ) profile.num_srq = dev_lim.max_srqs; icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca); if (icm_size < 0) { err = icm_size; goto err_stop_fw; } err = mthca_init_icm(mdev, &dev_lim, &init_hca, icm_size); if (err) goto err_stop_fw; err = mthca_INIT_HCA(mdev, &init_hca); if (err) { mthca_err(mdev, "INIT_HCA command returned %d, aborting.\n", err); goto err_free_icm; } return 0; err_free_icm: mthca_free_icms(mdev); err_stop_fw: mthca_UNMAP_FA(mdev); mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0); err_disable: if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM)) mthca_DISABLE_LAM(mdev); return err; } static void mthca_close_hca(struct mthca_dev *mdev) { mthca_CLOSE_HCA(mdev, 0); if (mthca_is_memfree(mdev)) { mthca_free_icms(mdev); mthca_UNMAP_FA(mdev); mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0); if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM)) mthca_DISABLE_LAM(mdev); } else mthca_SYS_DIS(mdev); } static int mthca_init_hca(struct mthca_dev *mdev) { int err; struct mthca_adapter adapter; if (mthca_is_memfree(mdev)) err = mthca_init_arbel(mdev); else err = mthca_init_tavor(mdev); if (err) return err; err = mthca_QUERY_ADAPTER(mdev, &adapter); if (err) { mthca_err(mdev, "QUERY_ADAPTER command returned %d, aborting.\n", err); goto err_close; } mdev->eq_table.inta_pin = adapter.inta_pin; if (!mthca_is_memfree(mdev)) mdev->rev_id = adapter.revision_id; memcpy(mdev->board_id, adapter.board_id, sizeof mdev->board_id); return 0; err_close: mthca_close_hca(mdev); return err; } static int mthca_setup_hca(struct mthca_dev *dev) { int err; MTHCA_INIT_DOORBELL_LOCK(&dev->doorbell_lock); err = mthca_init_uar_table(dev); if (err) { mthca_err(dev, "Failed to initialize " "user access region table, aborting.\n"); return err; } err = mthca_uar_alloc(dev, &dev->driver_uar); if (err) { mthca_err(dev, "Failed to allocate driver access region, " "aborting.\n"); goto err_uar_table_free; } dev->kar = ioremap((phys_addr_t) dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); if (!dev->kar) { mthca_err(dev, "Couldn't map kernel access region, " "aborting.\n"); err = -ENOMEM; goto err_uar_free; } err = mthca_init_pd_table(dev); if (err) { mthca_err(dev, "Failed to initialize " "protection domain table, aborting.\n"); goto err_kar_unmap; } err = mthca_init_mr_table(dev); if (err) { mthca_err(dev, "Failed to initialize " "memory region table, aborting.\n"); goto err_pd_table_free; } err = mthca_pd_alloc(dev, 1, &dev->driver_pd); if (err) { mthca_err(dev, "Failed to create driver PD, " "aborting.\n"); goto err_mr_table_free; } err = mthca_init_eq_table(dev); if (err) { mthca_err(dev, "Failed to initialize " "event queue table, aborting.\n"); goto err_pd_free; } err = mthca_cmd_use_events(dev); if (err) { mthca_err(dev, "Failed to switch to event-driven " "firmware commands, aborting.\n"); goto err_eq_table_free; } err = mthca_NOP(dev); if (err) { if (dev->mthca_flags & MTHCA_FLAG_MSI_X) { mthca_warn(dev, "NOP command failed to generate interrupt " "(IRQ %d).\n", dev->eq_table.eq[MTHCA_EQ_CMD].msi_x_vector); mthca_warn(dev, "Trying again with MSI-X disabled.\n"); } else { mthca_err(dev, "NOP command failed to generate interrupt " "(IRQ %d), aborting.\n", dev->pdev->irq); mthca_err(dev, "BIOS or ACPI interrupt routing problem?\n"); } goto err_cmd_poll; } mthca_dbg(dev, "NOP command IRQ test passed\n"); err = mthca_init_cq_table(dev); if (err) { mthca_err(dev, "Failed to initialize " "completion queue table, aborting.\n"); goto err_cmd_poll; } err = mthca_init_srq_table(dev); if (err) { mthca_err(dev, "Failed to initialize " "shared receive queue table, aborting.\n"); goto err_cq_table_free; } err = mthca_init_qp_table(dev); if (err) { mthca_err(dev, "Failed to initialize " "queue pair table, aborting.\n"); goto err_srq_table_free; } err = mthca_init_av_table(dev); if (err) { mthca_err(dev, "Failed to initialize " "address vector table, aborting.\n"); goto err_qp_table_free; } err = mthca_init_mcg_table(dev); if (err) { mthca_err(dev, "Failed to initialize " "multicast group table, aborting.\n"); goto err_av_table_free; } return 0; err_av_table_free: mthca_cleanup_av_table(dev); err_qp_table_free: mthca_cleanup_qp_table(dev); err_srq_table_free: mthca_cleanup_srq_table(dev); err_cq_table_free: mthca_cleanup_cq_table(dev); err_cmd_poll: mthca_cmd_use_polling(dev); err_eq_table_free: mthca_cleanup_eq_table(dev); err_pd_free: mthca_pd_free(dev, &dev->driver_pd); err_mr_table_free: mthca_cleanup_mr_table(dev); err_pd_table_free: mthca_cleanup_pd_table(dev); err_kar_unmap: iounmap(dev->kar); err_uar_free: mthca_uar_free(dev, &dev->driver_uar); err_uar_table_free: mthca_cleanup_uar_table(dev); return err; } static int mthca_enable_msi_x(struct mthca_dev *mdev) { int err; err = pci_alloc_irq_vectors(mdev->pdev, 3, 3, PCI_IRQ_MSIX); if (err < 0) return err; mdev->eq_table.eq[MTHCA_EQ_COMP ].msi_x_vector = pci_irq_vector(mdev->pdev, 0); mdev->eq_table.eq[MTHCA_EQ_ASYNC].msi_x_vector = pci_irq_vector(mdev->pdev, 1); mdev->eq_table.eq[MTHCA_EQ_CMD ].msi_x_vector = pci_irq_vector(mdev->pdev, 2); return 0; } /* Types of supported HCA */ enum { TAVOR, /* MT23108 */ ARBEL_COMPAT, /* MT25208 in Tavor compat mode */ ARBEL_NATIVE, /* MT25208 with extended features */ SINAI /* MT25204 */ }; #define MTHCA_FW_VER(major, minor, subminor) \ (((u64) (major) << 32) | ((u64) (minor) << 16) | (u64) (subminor)) static struct { u64 latest_fw; u32 flags; } mthca_hca_table[] = { [TAVOR] = { .latest_fw = MTHCA_FW_VER(3, 5, 0), .flags = 0 }, [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 8, 200), .flags = MTHCA_FLAG_PCIE }, [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 3, 0), .flags = MTHCA_FLAG_MEMFREE | MTHCA_FLAG_PCIE }, [SINAI] = { .latest_fw = MTHCA_FW_VER(1, 2, 0), .flags = MTHCA_FLAG_MEMFREE | MTHCA_FLAG_PCIE | MTHCA_FLAG_SINAI_OPT } }; static int __mthca_init_one(struct pci_dev *pdev, int hca_type) { int ddr_hidden = 0; int err; struct mthca_dev *mdev; printk(KERN_INFO PFX "Initializing %s\n", pci_name(pdev)); err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "Cannot enable PCI device, " "aborting.\n"); return err; } /* * Check for BARs. We expect 0: 1MB, 2: 8MB, 4: DDR (may not * be present) */ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || pci_resource_len(pdev, 0) != 1 << 20) { dev_err(&pdev->dev, "Missing DCS, aborting.\n"); err = -ENODEV; goto err_disable_pdev; } if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { dev_err(&pdev->dev, "Missing UAR, aborting.\n"); err = -ENODEV; goto err_disable_pdev; } if (!(pci_resource_flags(pdev, 4) & IORESOURCE_MEM)) ddr_hidden = 1; err = pci_request_regions(pdev, DRV_NAME); if (err) { dev_err(&pdev->dev, "Cannot obtain PCI resources, " "aborting.\n"); goto err_disable_pdev; } pci_set_master(pdev); err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err) { dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n"); goto err_free_res; } /* We can handle large RDMA requests, so allow larger segments. */ dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); mdev = ib_alloc_device(mthca_dev, ib_dev); if (!mdev) { dev_err(&pdev->dev, "Device struct alloc failed, " "aborting.\n"); err = -ENOMEM; goto err_free_res; } mdev->pdev = pdev; mdev->mthca_flags = mthca_hca_table[hca_type].flags; if (ddr_hidden) mdev->mthca_flags |= MTHCA_FLAG_DDR_HIDDEN; /* * Now reset the HCA before we touch the PCI capabilities or * attempt a firmware command, since a boot ROM may have left * the HCA in an undefined state. */ err = mthca_reset(mdev); if (err) { mthca_err(mdev, "Failed to reset HCA, aborting.\n"); goto err_free_dev; } err = mthca_cmd_init(mdev); if (err) { mthca_err(mdev, "Failed to init command interface, aborting.\n"); goto err_free_dev; } err = mthca_tune_pci(mdev); if (err) goto err_cmd; err = mthca_init_hca(mdev); if (err) goto err_cmd; if (mdev->fw_ver < mthca_hca_table[hca_type].latest_fw) { mthca_warn(mdev, "HCA FW version %d.%d.%03d is old (%d.%d.%03d is current).\n", (int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff, (int) (mdev->fw_ver & 0xffff), (int) (mthca_hca_table[hca_type].latest_fw >> 32), (int) (mthca_hca_table[hca_type].latest_fw >> 16) & 0xffff, (int) (mthca_hca_table[hca_type].latest_fw & 0xffff)); mthca_warn(mdev, "If you have problems, try updating your HCA FW.\n"); } if (msi_x && !mthca_enable_msi_x(mdev)) mdev->mthca_flags |= MTHCA_FLAG_MSI_X; err = mthca_setup_hca(mdev); if (err == -EBUSY && (mdev->mthca_flags & MTHCA_FLAG_MSI_X)) { pci_free_irq_vectors(pdev); mdev->mthca_flags &= ~MTHCA_FLAG_MSI_X; err = mthca_setup_hca(mdev); } if (err) goto err_close; err = mthca_register_device(mdev); if (err) goto err_cleanup; err = mthca_create_agents(mdev); if (err) goto err_unregister; pci_set_drvdata(pdev, mdev); mdev->hca_type = hca_type; mdev->active = true; return 0; err_unregister: mthca_unregister_device(mdev); err_cleanup: mthca_cleanup_mcg_table(mdev); mthca_cleanup_av_table(mdev); mthca_cleanup_qp_table(mdev); mthca_cleanup_srq_table(mdev); mthca_cleanup_cq_table(mdev); mthca_cmd_use_polling(mdev); mthca_cleanup_eq_table(mdev); mthca_pd_free(mdev, &mdev->driver_pd); mthca_cleanup_mr_table(mdev); mthca_cleanup_pd_table(mdev); mthca_cleanup_uar_table(mdev); err_close: if (mdev->mthca_flags & MTHCA_FLAG_MSI_X) pci_free_irq_vectors(pdev); mthca_close_hca(mdev); err_cmd: mthca_cmd_cleanup(mdev); err_free_dev: ib_dealloc_device(&mdev->ib_dev); err_free_res: pci_release_regions(pdev); err_disable_pdev: pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); return err; } static void __mthca_remove_one(struct pci_dev *pdev) { struct mthca_dev *mdev = pci_get_drvdata(pdev); int p; if (mdev) { mthca_free_agents(mdev); mthca_unregister_device(mdev); for (p = 1; p <= mdev->limits.num_ports; ++p) mthca_CLOSE_IB(mdev, p); mthca_cleanup_mcg_table(mdev); mthca_cleanup_av_table(mdev); mthca_cleanup_qp_table(mdev); mthca_cleanup_srq_table(mdev); mthca_cleanup_cq_table(mdev); mthca_cmd_use_polling(mdev); mthca_cleanup_eq_table(mdev); mthca_pd_free(mdev, &mdev->driver_pd); mthca_cleanup_mr_table(mdev); mthca_cleanup_pd_table(mdev); iounmap(mdev->kar); mthca_uar_free(mdev, &mdev->driver_uar); mthca_cleanup_uar_table(mdev); mthca_close_hca(mdev); mthca_cmd_cleanup(mdev); if (mdev->mthca_flags & MTHCA_FLAG_MSI_X) pci_free_irq_vectors(pdev); ib_dealloc_device(&mdev->ib_dev); pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } } int __mthca_restart_one(struct pci_dev *pdev) { struct mthca_dev *mdev; int hca_type; mdev = pci_get_drvdata(pdev); if (!mdev) return -ENODEV; hca_type = mdev->hca_type; __mthca_remove_one(pdev); return __mthca_init_one(pdev, hca_type); } static int mthca_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { int ret; mutex_lock(&mthca_device_mutex); printk_once(KERN_INFO "%s", mthca_version); if (id->driver_data >= ARRAY_SIZE(mthca_hca_table)) { printk(KERN_ERR PFX "%s has invalid driver data %lx\n", pci_name(pdev), id->driver_data); mutex_unlock(&mthca_device_mutex); return -ENODEV; } ret = __mthca_init_one(pdev, id->driver_data); mutex_unlock(&mthca_device_mutex); return ret; } static void mthca_remove_one(struct pci_dev *pdev) { mutex_lock(&mthca_device_mutex); __mthca_remove_one(pdev); mutex_unlock(&mthca_device_mutex); } static const struct pci_device_id mthca_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR), .driver_data = TAVOR }, { PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_TAVOR), .driver_data = TAVOR }, { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT), .driver_data = ARBEL_COMPAT }, { PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT), .driver_data = ARBEL_COMPAT }, { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_ARBEL), .driver_data = ARBEL_NATIVE }, { PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_ARBEL), .driver_data = ARBEL_NATIVE }, { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_SINAI), .driver_data = SINAI }, { PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_SINAI), .driver_data = SINAI }, { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_SINAI_OLD), .driver_data = SINAI }, { PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_SINAI_OLD), .driver_data = SINAI }, { 0, } }; MODULE_DEVICE_TABLE(pci, mthca_pci_table); static struct pci_driver mthca_driver = { .name = DRV_NAME, .id_table = mthca_pci_table, .probe = mthca_init_one, .remove = mthca_remove_one, }; static void __init __mthca_check_profile_val(const char *name, int *pval, int pval_default) { /* value must be positive and power of 2 */ int old_pval = *pval; if (old_pval <= 0) *pval = pval_default; else *pval = roundup_pow_of_two(old_pval); if (old_pval != *pval) { printk(KERN_WARNING PFX "Invalid value %d for %s in module parameter.\n", old_pval, name); printk(KERN_WARNING PFX "Corrected %s to %d.\n", name, *pval); } } #define mthca_check_profile_val(name, default) \ __mthca_check_profile_val(#name, &hca_profile.name, default) static void __init mthca_validate_profile(void) { mthca_check_profile_val(num_qp, MTHCA_DEFAULT_NUM_QP); mthca_check_profile_val(rdb_per_qp, MTHCA_DEFAULT_RDB_PER_QP); mthca_check_profile_val(num_cq, MTHCA_DEFAULT_NUM_CQ); mthca_check_profile_val(num_mcg, MTHCA_DEFAULT_NUM_MCG); mthca_check_profile_val(num_mpt, MTHCA_DEFAULT_NUM_MPT); mthca_check_profile_val(num_mtt, MTHCA_DEFAULT_NUM_MTT); mthca_check_profile_val(num_udav, MTHCA_DEFAULT_NUM_UDAV); mthca_check_profile_val(fmr_reserved_mtts, MTHCA_DEFAULT_NUM_RESERVED_MTTS); if (hca_profile.fmr_reserved_mtts >= hca_profile.num_mtt) { printk(KERN_WARNING PFX "Invalid fmr_reserved_mtts module parameter %d.\n", hca_profile.fmr_reserved_mtts); printk(KERN_WARNING PFX "(Must be smaller than num_mtt %d)\n", hca_profile.num_mtt); hca_profile.fmr_reserved_mtts = hca_profile.num_mtt / 2; printk(KERN_WARNING PFX "Corrected fmr_reserved_mtts to %d.\n", hca_profile.fmr_reserved_mtts); } if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) { printk(KERN_WARNING PFX "bad log_mtts_per_seg (%d). Using default - %d\n", log_mtts_per_seg, ilog2(MTHCA_MTT_SEG_SIZE / 8)); log_mtts_per_seg = ilog2(MTHCA_MTT_SEG_SIZE / 8); } } static int __init mthca_init(void) { int ret; mthca_validate_profile(); ret = mthca_catas_init(); if (ret) return ret; ret = pci_register_driver(&mthca_driver); if (ret < 0) { mthca_catas_cleanup(); return ret; } return 0; } static void __exit mthca_cleanup(void) { pci_unregister_driver(&mthca_driver); mthca_catas_cleanup(); } module_init(mthca_init); module_exit(mthca_cleanup);
linux-master
drivers/infiniband/hw/mthca/mthca_main.c
/* * Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2005 Cisco Systems. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * Copyright (c) 2004 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/string.h> #include <linux/slab.h> #include <linux/sched.h> #include <asm/io.h> #include <rdma/ib_verbs.h> #include <rdma/ib_cache.h> #include <rdma/ib_pack.h> #include <rdma/uverbs_ioctl.h> #include "mthca_dev.h" #include "mthca_cmd.h" #include "mthca_memfree.h" #include "mthca_wqe.h" enum { MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE, MTHCA_ACK_REQ_FREQ = 10, MTHCA_FLIGHT_LIMIT = 9, MTHCA_UD_HEADER_SIZE = 72, /* largest UD header possible */ MTHCA_INLINE_HEADER_SIZE = 4, /* data segment overhead for inline */ MTHCA_INLINE_CHUNK_SIZE = 16 /* inline data segment chunk */ }; enum { MTHCA_QP_STATE_RST = 0, MTHCA_QP_STATE_INIT = 1, MTHCA_QP_STATE_RTR = 2, MTHCA_QP_STATE_RTS = 3, MTHCA_QP_STATE_SQE = 4, MTHCA_QP_STATE_SQD = 5, MTHCA_QP_STATE_ERR = 6, MTHCA_QP_STATE_DRAINING = 7 }; enum { MTHCA_QP_ST_RC = 0x0, MTHCA_QP_ST_UC = 0x1, MTHCA_QP_ST_RD = 0x2, MTHCA_QP_ST_UD = 0x3, MTHCA_QP_ST_MLX = 0x7 }; enum { MTHCA_QP_PM_MIGRATED = 0x3, MTHCA_QP_PM_ARMED = 0x0, MTHCA_QP_PM_REARM = 0x1 }; enum { /* qp_context flags */ MTHCA_QP_BIT_DE = 1 << 8, /* params1 */ MTHCA_QP_BIT_SRE = 1 << 15, MTHCA_QP_BIT_SWE = 1 << 14, MTHCA_QP_BIT_SAE = 1 << 13, MTHCA_QP_BIT_SIC = 1 << 4, MTHCA_QP_BIT_SSC = 1 << 3, /* params2 */ MTHCA_QP_BIT_RRE = 1 << 15, MTHCA_QP_BIT_RWE = 1 << 14, MTHCA_QP_BIT_RAE = 1 << 13, MTHCA_QP_BIT_RIC = 1 << 4, MTHCA_QP_BIT_RSC = 1 << 3 }; enum { MTHCA_SEND_DOORBELL_FENCE = 1 << 5 }; struct mthca_qp_path { __be32 port_pkey; u8 rnr_retry; u8 g_mylmc; __be16 rlid; u8 ackto; u8 mgid_index; u8 static_rate; u8 hop_limit; __be32 sl_tclass_flowlabel; u8 rgid[16]; } __packed; struct mthca_qp_context { __be32 flags; __be32 tavor_sched_queue; /* Reserved on Arbel */ u8 mtu_msgmax; u8 rq_size_stride; /* Reserved on Tavor */ u8 sq_size_stride; /* Reserved on Tavor */ u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */ __be32 usr_page; __be32 local_qpn; __be32 remote_qpn; u32 reserved1[2]; struct mthca_qp_path pri_path; struct mthca_qp_path alt_path; __be32 rdd; __be32 pd; __be32 wqe_base; __be32 wqe_lkey; __be32 params1; __be32 reserved2; __be32 next_send_psn; __be32 cqn_snd; __be32 snd_wqe_base_l; /* Next send WQE on Tavor */ __be32 snd_db_index; /* (debugging only entries) */ __be32 last_acked_psn; __be32 ssn; __be32 params2; __be32 rnr_nextrecvpsn; __be32 ra_buff_indx; __be32 cqn_rcv; __be32 rcv_wqe_base_l; /* Next recv WQE on Tavor */ __be32 rcv_db_index; /* (debugging only entries) */ __be32 qkey; __be32 srqn; __be32 rmsn; __be16 rq_wqe_counter; /* reserved on Tavor */ __be16 sq_wqe_counter; /* reserved on Tavor */ u32 reserved3[18]; } __packed; struct mthca_qp_param { __be32 opt_param_mask; u32 reserved1; struct mthca_qp_context context; u32 reserved2[62]; } __packed; enum { MTHCA_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, MTHCA_QP_OPTPAR_RRE = 1 << 1, MTHCA_QP_OPTPAR_RAE = 1 << 2, MTHCA_QP_OPTPAR_RWE = 1 << 3, MTHCA_QP_OPTPAR_PKEY_INDEX = 1 << 4, MTHCA_QP_OPTPAR_Q_KEY = 1 << 5, MTHCA_QP_OPTPAR_RNR_TIMEOUT = 1 << 6, MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7, MTHCA_QP_OPTPAR_SRA_MAX = 1 << 8, MTHCA_QP_OPTPAR_RRA_MAX = 1 << 9, MTHCA_QP_OPTPAR_PM_STATE = 1 << 10, MTHCA_QP_OPTPAR_PORT_NUM = 1 << 11, MTHCA_QP_OPTPAR_RETRY_COUNT = 1 << 12, MTHCA_QP_OPTPAR_ALT_RNR_RETRY = 1 << 13, MTHCA_QP_OPTPAR_ACK_TIMEOUT = 1 << 14, MTHCA_QP_OPTPAR_RNR_RETRY = 1 << 15, MTHCA_QP_OPTPAR_SCHED_QUEUE = 1 << 16 }; static const u8 mthca_opcode[] = { [IB_WR_SEND] = MTHCA_OPCODE_SEND, [IB_WR_SEND_WITH_IMM] = MTHCA_OPCODE_SEND_IMM, [IB_WR_RDMA_WRITE] = MTHCA_OPCODE_RDMA_WRITE, [IB_WR_RDMA_WRITE_WITH_IMM] = MTHCA_OPCODE_RDMA_WRITE_IMM, [IB_WR_RDMA_READ] = MTHCA_OPCODE_RDMA_READ, [IB_WR_ATOMIC_CMP_AND_SWP] = MTHCA_OPCODE_ATOMIC_CS, [IB_WR_ATOMIC_FETCH_AND_ADD] = MTHCA_OPCODE_ATOMIC_FA, }; static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp) { return qp->qpn >= dev->qp_table.sqp_start && qp->qpn <= dev->qp_table.sqp_start + 3; } static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp) { return qp->qpn >= dev->qp_table.sqp_start && qp->qpn <= dev->qp_table.sqp_start + 1; } static void *get_recv_wqe(struct mthca_qp *qp, int n) { if (qp->is_direct) return qp->queue.direct.buf + (n << qp->rq.wqe_shift); else return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf + ((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1)); } static void *get_send_wqe(struct mthca_qp *qp, int n) { if (qp->is_direct) return qp->queue.direct.buf + qp->send_wqe_offset + (n << qp->sq.wqe_shift); else return qp->queue.page_list[(qp->send_wqe_offset + (n << qp->sq.wqe_shift)) >> PAGE_SHIFT].buf + ((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) & (PAGE_SIZE - 1)); } static void mthca_wq_reset(struct mthca_wq *wq) { wq->next_ind = 0; wq->last_comp = wq->max - 1; wq->head = 0; wq->tail = 0; } void mthca_qp_event(struct mthca_dev *dev, u32 qpn, enum ib_event_type event_type) { struct mthca_qp *qp; struct ib_event event; spin_lock(&dev->qp_table.lock); qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); if (qp) ++qp->refcount; spin_unlock(&dev->qp_table.lock); if (!qp) { mthca_warn(dev, "Async event %d for bogus QP %08x\n", event_type, qpn); return; } if (event_type == IB_EVENT_PATH_MIG) qp->port = qp->alt_port; event.device = &dev->ib_dev; event.event = event_type; event.element.qp = &qp->ibqp; if (qp->ibqp.event_handler) qp->ibqp.event_handler(&event, qp->ibqp.qp_context); spin_lock(&dev->qp_table.lock); if (!--qp->refcount) wake_up(&qp->wait); spin_unlock(&dev->qp_table.lock); } static int to_mthca_state(enum ib_qp_state ib_state) { switch (ib_state) { case IB_QPS_RESET: return MTHCA_QP_STATE_RST; case IB_QPS_INIT: return MTHCA_QP_STATE_INIT; case IB_QPS_RTR: return MTHCA_QP_STATE_RTR; case IB_QPS_RTS: return MTHCA_QP_STATE_RTS; case IB_QPS_SQD: return MTHCA_QP_STATE_SQD; case IB_QPS_SQE: return MTHCA_QP_STATE_SQE; case IB_QPS_ERR: return MTHCA_QP_STATE_ERR; default: return -1; } } enum { RC, UC, UD, RD, RDEE, MLX, NUM_TRANS }; static int to_mthca_st(int transport) { switch (transport) { case RC: return MTHCA_QP_ST_RC; case UC: return MTHCA_QP_ST_UC; case UD: return MTHCA_QP_ST_UD; case RD: return MTHCA_QP_ST_RD; case MLX: return MTHCA_QP_ST_MLX; default: return -1; } } static void store_attrs(struct mthca_sqp *sqp, const struct ib_qp_attr *attr, int attr_mask) { if (attr_mask & IB_QP_PKEY_INDEX) sqp->pkey_index = attr->pkey_index; if (attr_mask & IB_QP_QKEY) sqp->qkey = attr->qkey; if (attr_mask & IB_QP_SQ_PSN) sqp->send_psn = attr->sq_psn; } static void init_port(struct mthca_dev *dev, int port) { int err; struct mthca_init_ib_param param; memset(&param, 0, sizeof param); param.port_width = dev->limits.port_width_cap; param.vl_cap = dev->limits.vl_cap; param.mtu_cap = dev->limits.mtu_cap; param.gid_cap = dev->limits.gid_table_len; param.pkey_cap = dev->limits.pkey_table_len; err = mthca_INIT_IB(dev, &param, port); if (err) mthca_warn(dev, "INIT_IB failed, return code %d.\n", err); } static __be32 get_hw_access_flags(struct mthca_qp *qp, const struct ib_qp_attr *attr, int attr_mask) { u8 dest_rd_atomic; u32 access_flags; u32 hw_access_flags = 0; if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) dest_rd_atomic = attr->max_dest_rd_atomic; else dest_rd_atomic = qp->resp_depth; if (attr_mask & IB_QP_ACCESS_FLAGS) access_flags = attr->qp_access_flags; else access_flags = qp->atomic_rd_en; if (!dest_rd_atomic) access_flags &= IB_ACCESS_REMOTE_WRITE; if (access_flags & IB_ACCESS_REMOTE_READ) hw_access_flags |= MTHCA_QP_BIT_RRE; if (access_flags & IB_ACCESS_REMOTE_ATOMIC) hw_access_flags |= MTHCA_QP_BIT_RAE; if (access_flags & IB_ACCESS_REMOTE_WRITE) hw_access_flags |= MTHCA_QP_BIT_RWE; return cpu_to_be32(hw_access_flags); } static inline enum ib_qp_state to_ib_qp_state(int mthca_state) { switch (mthca_state) { case MTHCA_QP_STATE_RST: return IB_QPS_RESET; case MTHCA_QP_STATE_INIT: return IB_QPS_INIT; case MTHCA_QP_STATE_RTR: return IB_QPS_RTR; case MTHCA_QP_STATE_RTS: return IB_QPS_RTS; case MTHCA_QP_STATE_DRAINING: case MTHCA_QP_STATE_SQD: return IB_QPS_SQD; case MTHCA_QP_STATE_SQE: return IB_QPS_SQE; case MTHCA_QP_STATE_ERR: return IB_QPS_ERR; default: return -1; } } static inline enum ib_mig_state to_ib_mig_state(int mthca_mig_state) { switch (mthca_mig_state) { case 0: return IB_MIG_ARMED; case 1: return IB_MIG_REARM; case 3: return IB_MIG_MIGRATED; default: return -1; } } static int to_ib_qp_access_flags(int mthca_flags) { int ib_flags = 0; if (mthca_flags & MTHCA_QP_BIT_RRE) ib_flags |= IB_ACCESS_REMOTE_READ; if (mthca_flags & MTHCA_QP_BIT_RWE) ib_flags |= IB_ACCESS_REMOTE_WRITE; if (mthca_flags & MTHCA_QP_BIT_RAE) ib_flags |= IB_ACCESS_REMOTE_ATOMIC; return ib_flags; } static void to_rdma_ah_attr(struct mthca_dev *dev, struct rdma_ah_attr *ah_attr, struct mthca_qp_path *path) { u8 port_num = (be32_to_cpu(path->port_pkey) >> 24) & 0x3; memset(ah_attr, 0, sizeof(*ah_attr)); if (port_num == 0 || port_num > dev->limits.num_ports) return; ah_attr->type = rdma_ah_find_type(&dev->ib_dev, port_num); rdma_ah_set_port_num(ah_attr, port_num); rdma_ah_set_dlid(ah_attr, be16_to_cpu(path->rlid)); rdma_ah_set_sl(ah_attr, be32_to_cpu(path->sl_tclass_flowlabel) >> 28); rdma_ah_set_path_bits(ah_attr, path->g_mylmc & 0x7f); rdma_ah_set_static_rate(ah_attr, mthca_rate_to_ib(dev, path->static_rate & 0xf, port_num)); if (path->g_mylmc & (1 << 7)) { u32 tc_fl = be32_to_cpu(path->sl_tclass_flowlabel); rdma_ah_set_grh(ah_attr, NULL, tc_fl & 0xfffff, path->mgid_index & (dev->limits.gid_table_len - 1), path->hop_limit, (tc_fl >> 20) & 0xff); rdma_ah_set_dgid_raw(ah_attr, path->rgid); } } int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) { struct mthca_dev *dev = to_mdev(ibqp->device); struct mthca_qp *qp = to_mqp(ibqp); int err = 0; struct mthca_mailbox *mailbox = NULL; struct mthca_qp_param *qp_param; struct mthca_qp_context *context; int mthca_state; mutex_lock(&qp->mutex); if (qp->state == IB_QPS_RESET) { qp_attr->qp_state = IB_QPS_RESET; goto done; } mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) { err = PTR_ERR(mailbox); goto out; } err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox); if (err) { mthca_warn(dev, "QUERY_QP failed (%d)\n", err); goto out_mailbox; } qp_param = mailbox->buf; context = &qp_param->context; mthca_state = be32_to_cpu(context->flags) >> 28; qp->state = to_ib_qp_state(mthca_state); qp_attr->qp_state = qp->state; qp_attr->path_mtu = context->mtu_msgmax >> 5; qp_attr->path_mig_state = to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3); qp_attr->qkey = be32_to_cpu(context->qkey); qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff; qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff; qp_attr->dest_qp_num = be32_to_cpu(context->remote_qpn) & 0xffffff; qp_attr->qp_access_flags = to_ib_qp_access_flags(be32_to_cpu(context->params2)); if (qp->transport == RC || qp->transport == UC) { to_rdma_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); to_rdma_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path); qp_attr->alt_pkey_index = be32_to_cpu(context->alt_path.port_pkey) & 0x7f; qp_attr->alt_port_num = rdma_ah_get_port_num(&qp_attr->alt_ah_attr); } qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f; qp_attr->port_num = (be32_to_cpu(context->pri_path.port_pkey) >> 24) & 0x3; /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ qp_attr->sq_draining = mthca_state == MTHCA_QP_STATE_DRAINING; qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7); qp_attr->max_dest_rd_atomic = 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7); qp_attr->min_rnr_timer = (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f; qp_attr->timeout = context->pri_path.ackto >> 3; qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7; qp_attr->rnr_retry = context->pri_path.rnr_retry >> 5; qp_attr->alt_timeout = context->alt_path.ackto >> 3; done: qp_attr->cur_qp_state = qp_attr->qp_state; qp_attr->cap.max_send_wr = qp->sq.max; qp_attr->cap.max_recv_wr = qp->rq.max; qp_attr->cap.max_send_sge = qp->sq.max_gs; qp_attr->cap.max_recv_sge = qp->rq.max_gs; qp_attr->cap.max_inline_data = qp->max_inline_data; qp_init_attr->cap = qp_attr->cap; qp_init_attr->sq_sig_type = qp->sq_policy; out_mailbox: mthca_free_mailbox(dev, mailbox); out: mutex_unlock(&qp->mutex); return err; } static int mthca_path_set(struct mthca_dev *dev, const struct rdma_ah_attr *ah, struct mthca_qp_path *path, u8 port) { path->g_mylmc = rdma_ah_get_path_bits(ah) & 0x7f; path->rlid = cpu_to_be16(rdma_ah_get_dlid(ah)); path->static_rate = mthca_get_rate(dev, rdma_ah_get_static_rate(ah), port); if (rdma_ah_get_ah_flags(ah) & IB_AH_GRH) { const struct ib_global_route *grh = rdma_ah_read_grh(ah); if (grh->sgid_index >= dev->limits.gid_table_len) { mthca_dbg(dev, "sgid_index (%u) too large. max is %d\n", grh->sgid_index, dev->limits.gid_table_len - 1); return -1; } path->g_mylmc |= 1 << 7; path->mgid_index = grh->sgid_index; path->hop_limit = grh->hop_limit; path->sl_tclass_flowlabel = cpu_to_be32((rdma_ah_get_sl(ah) << 28) | (grh->traffic_class << 20) | (grh->flow_label)); memcpy(path->rgid, grh->dgid.raw, 16); } else { path->sl_tclass_flowlabel = cpu_to_be32(rdma_ah_get_sl(ah) << 28); } return 0; } static int __mthca_modify_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, int attr_mask, enum ib_qp_state cur_state, enum ib_qp_state new_state, struct ib_udata *udata) { struct mthca_dev *dev = to_mdev(ibqp->device); struct mthca_qp *qp = to_mqp(ibqp); struct mthca_ucontext *context = rdma_udata_to_drv_context( udata, struct mthca_ucontext, ibucontext); struct mthca_mailbox *mailbox; struct mthca_qp_param *qp_param; struct mthca_qp_context *qp_context; u32 sqd_event = 0; int err = -EINVAL; mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) { err = PTR_ERR(mailbox); goto out; } qp_param = mailbox->buf; qp_context = &qp_param->context; memset(qp_param, 0, sizeof *qp_param); qp_context->flags = cpu_to_be32((to_mthca_state(new_state) << 28) | (to_mthca_st(qp->transport) << 16)); qp_context->flags |= cpu_to_be32(MTHCA_QP_BIT_DE); if (!(attr_mask & IB_QP_PATH_MIG_STATE)) qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11); else { qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PM_STATE); switch (attr->path_mig_state) { case IB_MIG_MIGRATED: qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11); break; case IB_MIG_REARM: qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_REARM << 11); break; case IB_MIG_ARMED: qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_ARMED << 11); break; } } /* leave tavor_sched_queue as 0 */ if (qp->transport == MLX || qp->transport == UD) qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11; else if (attr_mask & IB_QP_PATH_MTU) { if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_2048) { mthca_dbg(dev, "path MTU (%u) is invalid\n", attr->path_mtu); goto out_mailbox; } qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31; } if (mthca_is_memfree(dev)) { if (qp->rq.max) qp_context->rq_size_stride = ilog2(qp->rq.max) << 3; qp_context->rq_size_stride |= qp->rq.wqe_shift - 4; if (qp->sq.max) qp_context->sq_size_stride = ilog2(qp->sq.max) << 3; qp_context->sq_size_stride |= qp->sq.wqe_shift - 4; } /* leave arbel_sched_queue as 0 */ if (qp->ibqp.uobject) qp_context->usr_page = cpu_to_be32(context->uar.index); else qp_context->usr_page = cpu_to_be32(dev->driver_uar.index); qp_context->local_qpn = cpu_to_be32(qp->qpn); if (attr_mask & IB_QP_DEST_QPN) { qp_context->remote_qpn = cpu_to_be32(attr->dest_qp_num); } if (qp->transport == MLX) qp_context->pri_path.port_pkey |= cpu_to_be32(qp->port << 24); else { if (attr_mask & IB_QP_PORT) { qp_context->pri_path.port_pkey |= cpu_to_be32(attr->port_num << 24); qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PORT_NUM); } } if (attr_mask & IB_QP_PKEY_INDEX) { qp_context->pri_path.port_pkey |= cpu_to_be32(attr->pkey_index); qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PKEY_INDEX); } if (attr_mask & IB_QP_RNR_RETRY) { qp_context->alt_path.rnr_retry = qp_context->pri_path.rnr_retry = attr->rnr_retry << 5; qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY | MTHCA_QP_OPTPAR_ALT_RNR_RETRY); } if (attr_mask & IB_QP_AV) { if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path, attr_mask & IB_QP_PORT ? attr->port_num : qp->port)) goto out_mailbox; qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH); } if (ibqp->qp_type == IB_QPT_RC && cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { u8 sched_queue = ibqp->uobject ? 0x2 : 0x1; if (mthca_is_memfree(dev)) qp_context->rlkey_arbel_sched_queue |= sched_queue; else qp_context->tavor_sched_queue |= cpu_to_be32(sched_queue); qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SCHED_QUEUE); } if (attr_mask & IB_QP_TIMEOUT) { qp_context->pri_path.ackto = attr->timeout << 3; qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT); } if (attr_mask & IB_QP_ALT_PATH) { if (attr->alt_pkey_index >= dev->limits.pkey_table_len) { mthca_dbg(dev, "Alternate P_Key index (%u) too large. max is %d\n", attr->alt_pkey_index, dev->limits.pkey_table_len-1); goto out_mailbox; } if (attr->alt_port_num == 0 || attr->alt_port_num > dev->limits.num_ports) { mthca_dbg(dev, "Alternate port number (%u) is invalid\n", attr->alt_port_num); goto out_mailbox; } if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path, rdma_ah_get_port_num(&attr->alt_ah_attr))) goto out_mailbox; qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index | attr->alt_port_num << 24); qp_context->alt_path.ackto = attr->alt_timeout << 3; qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ALT_ADDR_PATH); } /* leave rdd as 0 */ qp_context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pd_num); /* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */ qp_context->wqe_lkey = cpu_to_be32(qp->mr.ibmr.lkey); qp_context->params1 = cpu_to_be32((MTHCA_ACK_REQ_FREQ << 28) | (MTHCA_FLIGHT_LIMIT << 24) | MTHCA_QP_BIT_SWE); if (qp->sq_policy == IB_SIGNAL_ALL_WR) qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SSC); if (attr_mask & IB_QP_RETRY_CNT) { qp_context->params1 |= cpu_to_be32(attr->retry_cnt << 16); qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT); } if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { if (attr->max_rd_atomic) { qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SRE | MTHCA_QP_BIT_SAE); qp_context->params1 |= cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21); } qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX); } if (attr_mask & IB_QP_SQ_PSN) qp_context->next_send_psn = cpu_to_be32(attr->sq_psn); qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn); if (mthca_is_memfree(dev)) { qp_context->snd_wqe_base_l = cpu_to_be32(qp->send_wqe_offset); qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index); } if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { if (attr->max_dest_rd_atomic) qp_context->params2 |= cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21); qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX); } if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) { qp_context->params2 |= get_hw_access_flags(qp, attr, attr_mask); qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE | MTHCA_QP_OPTPAR_RRE | MTHCA_QP_OPTPAR_RAE); } qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC); if (ibqp->srq) qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC); if (attr_mask & IB_QP_MIN_RNR_TIMER) { qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24); qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT); } if (attr_mask & IB_QP_RQ_PSN) qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn); qp_context->ra_buff_indx = cpu_to_be32(dev->qp_table.rdb_base + ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE << dev->qp_table.rdb_shift)); qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn); if (mthca_is_memfree(dev)) qp_context->rcv_db_index = cpu_to_be32(qp->rq.db_index); if (attr_mask & IB_QP_QKEY) { qp_context->qkey = cpu_to_be32(attr->qkey); qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY); } if (ibqp->srq) qp_context->srqn = cpu_to_be32(1 << 24 | to_msrq(ibqp->srq)->srqn); if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify) sqd_event = 1 << 31; err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0, mailbox, sqd_event); if (err) { mthca_warn(dev, "modify QP %d->%d returned %d.\n", cur_state, new_state, err); goto out_mailbox; } qp->state = new_state; if (attr_mask & IB_QP_ACCESS_FLAGS) qp->atomic_rd_en = attr->qp_access_flags; if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) qp->resp_depth = attr->max_dest_rd_atomic; if (attr_mask & IB_QP_PORT) qp->port = attr->port_num; if (attr_mask & IB_QP_ALT_PATH) qp->alt_port = attr->alt_port_num; if (is_sqp(dev, qp)) store_attrs(qp->sqp, attr, attr_mask); /* * If we moved QP0 to RTR, bring the IB link up; if we moved * QP0 to RESET or ERROR, bring the link back down. */ if (is_qp0(dev, qp)) { if (cur_state != IB_QPS_RTR && new_state == IB_QPS_RTR) init_port(dev, qp->port); if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR && (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR)) mthca_CLOSE_IB(dev, qp->port); } /* * If we moved a kernel QP to RESET, clean up all old CQ * entries and reinitialize the QP. */ if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) { mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); if (qp->ibqp.send_cq != qp->ibqp.recv_cq) mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, NULL); mthca_wq_reset(&qp->sq); qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); mthca_wq_reset(&qp->rq); qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1); if (mthca_is_memfree(dev)) { *qp->sq.db = 0; *qp->rq.db = 0; } } out_mailbox: mthca_free_mailbox(dev, mailbox); out: return err; } int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { struct mthca_dev *dev = to_mdev(ibqp->device); struct mthca_qp *qp = to_mqp(ibqp); enum ib_qp_state cur_state, new_state; int err = -EINVAL; if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) return -EOPNOTSUPP; mutex_lock(&qp->mutex); if (attr_mask & IB_QP_CUR_STATE) { cur_state = attr->cur_qp_state; } else { spin_lock_irq(&qp->sq.lock); spin_lock(&qp->rq.lock); cur_state = qp->state; spin_unlock(&qp->rq.lock); spin_unlock_irq(&qp->sq.lock); } new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) { mthca_dbg(dev, "Bad QP transition (transport %d) " "%d->%d with attr 0x%08x\n", qp->transport, cur_state, new_state, attr_mask); goto out; } if ((attr_mask & IB_QP_PKEY_INDEX) && attr->pkey_index >= dev->limits.pkey_table_len) { mthca_dbg(dev, "P_Key index (%u) too large. max is %d\n", attr->pkey_index, dev->limits.pkey_table_len-1); goto out; } if ((attr_mask & IB_QP_PORT) && (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) { mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num); goto out; } if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && attr->max_rd_atomic > dev->limits.max_qp_init_rdma) { mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n", attr->max_rd_atomic, dev->limits.max_qp_init_rdma); goto out; } if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) { mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n", attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift); goto out; } if (cur_state == new_state && cur_state == IB_QPS_RESET) { err = 0; goto out; } err = __mthca_modify_qp(ibqp, attr, attr_mask, cur_state, new_state, udata); out: mutex_unlock(&qp->mutex); return err; } static int mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp *qp, int desc_sz) { /* * Calculate the maximum size of WQE s/g segments, excluding * the next segment and other non-data segments. */ int max_data_size = desc_sz - sizeof (struct mthca_next_seg); switch (qp->transport) { case MLX: max_data_size -= 2 * sizeof (struct mthca_data_seg); break; case UD: if (mthca_is_memfree(dev)) max_data_size -= sizeof (struct mthca_arbel_ud_seg); else max_data_size -= sizeof (struct mthca_tavor_ud_seg); break; default: max_data_size -= sizeof (struct mthca_raddr_seg); break; } return max_data_size; } static inline int mthca_max_inline_data(struct mthca_pd *pd, int max_data_size) { /* We don't support inline data for kernel QPs (yet). */ return pd->ibpd.uobject ? max_data_size - MTHCA_INLINE_HEADER_SIZE : 0; } static void mthca_adjust_qp_caps(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_qp *qp) { int max_data_size = mthca_max_data_size(dev, qp, min(dev->limits.max_desc_sz, 1 << qp->sq.wqe_shift)); qp->max_inline_data = mthca_max_inline_data(pd, max_data_size); qp->sq.max_gs = min_t(int, dev->limits.max_sg, max_data_size / sizeof (struct mthca_data_seg)); qp->rq.max_gs = min_t(int, dev->limits.max_sg, (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) - sizeof (struct mthca_next_seg)) / sizeof (struct mthca_data_seg)); } /* * Allocate and register buffer for WQEs. qp->rq.max, sq.max, * rq.max_gs and sq.max_gs must all be assigned. * mthca_alloc_wqe_buf will calculate rq.wqe_shift and * sq.wqe_shift (as well as send_wqe_offset, is_direct, and * queue) */ static int mthca_alloc_wqe_buf(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_qp *qp, struct ib_udata *udata) { int size; int err = -ENOMEM; size = sizeof (struct mthca_next_seg) + qp->rq.max_gs * sizeof (struct mthca_data_seg); if (size > dev->limits.max_desc_sz) return -EINVAL; for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size; qp->rq.wqe_shift++) ; /* nothing */ size = qp->sq.max_gs * sizeof (struct mthca_data_seg); switch (qp->transport) { case MLX: size += 2 * sizeof (struct mthca_data_seg); break; case UD: size += mthca_is_memfree(dev) ? sizeof (struct mthca_arbel_ud_seg) : sizeof (struct mthca_tavor_ud_seg); break; case UC: size += sizeof (struct mthca_raddr_seg); break; case RC: size += sizeof (struct mthca_raddr_seg); /* * An atomic op will require an atomic segment, a * remote address segment and one scatter entry. */ size = max_t(int, size, sizeof (struct mthca_atomic_seg) + sizeof (struct mthca_raddr_seg) + sizeof (struct mthca_data_seg)); break; default: break; } /* Make sure that we have enough space for a bind request */ size = max_t(int, size, sizeof (struct mthca_bind_seg)); size += sizeof (struct mthca_next_seg); if (size > dev->limits.max_desc_sz) return -EINVAL; for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size; qp->sq.wqe_shift++) ; /* nothing */ qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift, 1 << qp->sq.wqe_shift); /* * If this is a userspace QP, we don't actually have to * allocate anything. All we need is to calculate the WQE * sizes and the send_wqe_offset, so we're done now. */ if (udata) return 0; size = PAGE_ALIGN(qp->send_wqe_offset + (qp->sq.max << qp->sq.wqe_shift)); qp->wrid = kmalloc_array(qp->rq.max + qp->sq.max, sizeof(u64), GFP_KERNEL); if (!qp->wrid) goto err_out; err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE, &qp->queue, &qp->is_direct, pd, 0, &qp->mr); if (err) goto err_out; return 0; err_out: kfree(qp->wrid); return err; } static void mthca_free_wqe_buf(struct mthca_dev *dev, struct mthca_qp *qp) { mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset + (qp->sq.max << qp->sq.wqe_shift)), &qp->queue, qp->is_direct, &qp->mr); kfree(qp->wrid); } static int mthca_map_memfree(struct mthca_dev *dev, struct mthca_qp *qp) { int ret; if (mthca_is_memfree(dev)) { ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn); if (ret) return ret; ret = mthca_table_get(dev, dev->qp_table.eqp_table, qp->qpn); if (ret) goto err_qpc; ret = mthca_table_get(dev, dev->qp_table.rdb_table, qp->qpn << dev->qp_table.rdb_shift); if (ret) goto err_eqpc; } return 0; err_eqpc: mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn); err_qpc: mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn); return ret; } static void mthca_unmap_memfree(struct mthca_dev *dev, struct mthca_qp *qp) { mthca_table_put(dev, dev->qp_table.rdb_table, qp->qpn << dev->qp_table.rdb_shift); mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn); mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn); } static int mthca_alloc_memfree(struct mthca_dev *dev, struct mthca_qp *qp) { if (mthca_is_memfree(dev)) { qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ, qp->qpn, &qp->rq.db); if (qp->rq.db_index < 0) return -ENOMEM; qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ, qp->qpn, &qp->sq.db); if (qp->sq.db_index < 0) { mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); return -ENOMEM; } } return 0; } static void mthca_free_memfree(struct mthca_dev *dev, struct mthca_qp *qp) { if (mthca_is_memfree(dev)) { mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index); mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); } } static int mthca_alloc_qp_common(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_cq *send_cq, struct mthca_cq *recv_cq, enum ib_sig_type send_policy, struct mthca_qp *qp, struct ib_udata *udata) { int ret; int i; struct mthca_next_seg *next; qp->refcount = 1; init_waitqueue_head(&qp->wait); mutex_init(&qp->mutex); qp->state = IB_QPS_RESET; qp->atomic_rd_en = 0; qp->resp_depth = 0; qp->sq_policy = send_policy; mthca_wq_reset(&qp->sq); mthca_wq_reset(&qp->rq); spin_lock_init(&qp->sq.lock); spin_lock_init(&qp->rq.lock); ret = mthca_map_memfree(dev, qp); if (ret) return ret; ret = mthca_alloc_wqe_buf(dev, pd, qp, udata); if (ret) { mthca_unmap_memfree(dev, qp); return ret; } mthca_adjust_qp_caps(dev, pd, qp); /* * If this is a userspace QP, we're done now. The doorbells * will be allocated and buffers will be initialized in * userspace. */ if (udata) return 0; ret = mthca_alloc_memfree(dev, qp); if (ret) { mthca_free_wqe_buf(dev, qp); mthca_unmap_memfree(dev, qp); return ret; } if (mthca_is_memfree(dev)) { struct mthca_data_seg *scatter; int size = (sizeof (struct mthca_next_seg) + qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16; for (i = 0; i < qp->rq.max; ++i) { next = get_recv_wqe(qp, i); next->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) << qp->rq.wqe_shift); next->ee_nds = cpu_to_be32(size); for (scatter = (void *) (next + 1); (void *) scatter < (void *) next + (1 << qp->rq.wqe_shift); ++scatter) scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); } for (i = 0; i < qp->sq.max; ++i) { next = get_send_wqe(qp, i); next->nda_op = cpu_to_be32((((i + 1) & (qp->sq.max - 1)) << qp->sq.wqe_shift) + qp->send_wqe_offset); } } else { for (i = 0; i < qp->rq.max; ++i) { next = get_recv_wqe(qp, i); next->nda_op = htonl((((i + 1) % qp->rq.max) << qp->rq.wqe_shift) | 1); } } qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1); return 0; } static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap, struct mthca_pd *pd, struct mthca_qp *qp) { int max_data_size = mthca_max_data_size(dev, qp, dev->limits.max_desc_sz); /* Sanity check QP size before proceeding */ if (cap->max_send_wr > dev->limits.max_wqes || cap->max_recv_wr > dev->limits.max_wqes || cap->max_send_sge > dev->limits.max_sg || cap->max_recv_sge > dev->limits.max_sg || cap->max_inline_data > mthca_max_inline_data(pd, max_data_size)) return -EINVAL; /* * For MLX transport we need 2 extra send gather entries: * one for the header and one for the checksum at the end */ if (qp->transport == MLX && cap->max_send_sge + 2 > dev->limits.max_sg) return -EINVAL; if (mthca_is_memfree(dev)) { qp->rq.max = cap->max_recv_wr ? roundup_pow_of_two(cap->max_recv_wr) : 0; qp->sq.max = cap->max_send_wr ? roundup_pow_of_two(cap->max_send_wr) : 0; } else { qp->rq.max = cap->max_recv_wr; qp->sq.max = cap->max_send_wr; } qp->rq.max_gs = cap->max_recv_sge; qp->sq.max_gs = max_t(int, cap->max_send_sge, ALIGN(cap->max_inline_data + MTHCA_INLINE_HEADER_SIZE, MTHCA_INLINE_CHUNK_SIZE) / sizeof (struct mthca_data_seg)); return 0; } int mthca_alloc_qp(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_cq *send_cq, struct mthca_cq *recv_cq, enum ib_qp_type type, enum ib_sig_type send_policy, struct ib_qp_cap *cap, struct mthca_qp *qp, struct ib_udata *udata) { int err; switch (type) { case IB_QPT_RC: qp->transport = RC; break; case IB_QPT_UC: qp->transport = UC; break; case IB_QPT_UD: qp->transport = UD; break; default: return -EINVAL; } err = mthca_set_qp_size(dev, cap, pd, qp); if (err) return err; qp->qpn = mthca_alloc(&dev->qp_table.alloc); if (qp->qpn == -1) return -ENOMEM; /* initialize port to zero for error-catching. */ qp->port = 0; err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, send_policy, qp, udata); if (err) { mthca_free(&dev->qp_table.alloc, qp->qpn); return err; } spin_lock_irq(&dev->qp_table.lock); mthca_array_set(&dev->qp_table.qp, qp->qpn & (dev->limits.num_qps - 1), qp); spin_unlock_irq(&dev->qp_table.lock); return 0; } static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) __acquires(&send_cq->lock) __acquires(&recv_cq->lock) { if (send_cq == recv_cq) { spin_lock_irq(&send_cq->lock); __acquire(&recv_cq->lock); } else if (send_cq->cqn < recv_cq->cqn) { spin_lock_irq(&send_cq->lock); spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); } else { spin_lock_irq(&recv_cq->lock); spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); } } static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) __releases(&send_cq->lock) __releases(&recv_cq->lock) { if (send_cq == recv_cq) { __release(&recv_cq->lock); spin_unlock_irq(&send_cq->lock); } else if (send_cq->cqn < recv_cq->cqn) { spin_unlock(&recv_cq->lock); spin_unlock_irq(&send_cq->lock); } else { spin_unlock(&send_cq->lock); spin_unlock_irq(&recv_cq->lock); } } int mthca_alloc_sqp(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_cq *send_cq, struct mthca_cq *recv_cq, enum ib_sig_type send_policy, struct ib_qp_cap *cap, int qpn, u32 port, struct mthca_qp *qp, struct ib_udata *udata) { u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1; int err; qp->transport = MLX; err = mthca_set_qp_size(dev, cap, pd, qp); if (err) return err; qp->sqp->header_buf_size = qp->sq.max * MTHCA_UD_HEADER_SIZE; qp->sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, qp->sqp->header_buf_size, &qp->sqp->header_dma, GFP_KERNEL); if (!qp->sqp->header_buf) return -ENOMEM; spin_lock_irq(&dev->qp_table.lock); if (mthca_array_get(&dev->qp_table.qp, mqpn)) err = -EBUSY; else mthca_array_set(&dev->qp_table.qp, mqpn, qp); spin_unlock_irq(&dev->qp_table.lock); if (err) goto err_out; qp->port = port; qp->qpn = mqpn; qp->transport = MLX; err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, send_policy, qp, udata); if (err) goto err_out_free; atomic_inc(&pd->sqp_count); return 0; err_out_free: /* * Lock CQs here, so that CQ polling code can do QP lookup * without taking a lock. */ mthca_lock_cqs(send_cq, recv_cq); spin_lock(&dev->qp_table.lock); mthca_array_clear(&dev->qp_table.qp, mqpn); spin_unlock(&dev->qp_table.lock); mthca_unlock_cqs(send_cq, recv_cq); err_out: dma_free_coherent(&dev->pdev->dev, qp->sqp->header_buf_size, qp->sqp->header_buf, qp->sqp->header_dma); return err; } static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp) { int c; spin_lock_irq(&dev->qp_table.lock); c = qp->refcount; spin_unlock_irq(&dev->qp_table.lock); return c; } void mthca_free_qp(struct mthca_dev *dev, struct mthca_qp *qp) { struct mthca_cq *send_cq; struct mthca_cq *recv_cq; send_cq = to_mcq(qp->ibqp.send_cq); recv_cq = to_mcq(qp->ibqp.recv_cq); /* * Lock CQs here, so that CQ polling code can do QP lookup * without taking a lock. */ mthca_lock_cqs(send_cq, recv_cq); spin_lock(&dev->qp_table.lock); mthca_array_clear(&dev->qp_table.qp, qp->qpn & (dev->limits.num_qps - 1)); --qp->refcount; spin_unlock(&dev->qp_table.lock); mthca_unlock_cqs(send_cq, recv_cq); wait_event(qp->wait, !get_qp_refcount(dev, qp)); if (qp->state != IB_QPS_RESET) mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0, NULL, 0); /* * If this is a userspace QP, the buffers, MR, CQs and so on * will be cleaned up in userspace, so all we have to do is * unref the mem-free tables and free the QPN in our table. */ if (!qp->ibqp.uobject) { mthca_cq_clean(dev, recv_cq, qp->qpn, qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); if (send_cq != recv_cq) mthca_cq_clean(dev, send_cq, qp->qpn, NULL); mthca_free_memfree(dev, qp); mthca_free_wqe_buf(dev, qp); } mthca_unmap_memfree(dev, qp); if (is_sqp(dev, qp)) { atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count)); dma_free_coherent(&dev->pdev->dev, qp->sqp->header_buf_size, qp->sqp->header_buf, qp->sqp->header_dma); } else mthca_free(&dev->qp_table.alloc, qp->qpn); } /* Create UD header for an MLX send and build a data segment for it */ static int build_mlx_header(struct mthca_dev *dev, struct mthca_qp *qp, int ind, const struct ib_ud_wr *wr, struct mthca_mlx_seg *mlx, struct mthca_data_seg *data) { struct mthca_sqp *sqp = qp->sqp; int header_size; int err; u16 pkey; ib_ud_header_init(256, /* assume a MAD */ 1, 0, 0, mthca_ah_grh_present(to_mah(wr->ah)), 0, 0, 0, &sqp->ud_header); err = mthca_read_ah(dev, to_mah(wr->ah), &sqp->ud_header); if (err) return err; mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1); mlx->flags |= cpu_to_be32((!qp->ibqp.qp_num ? MTHCA_MLX_VL15 : 0) | (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) | (sqp->ud_header.lrh.service_level << 8)); mlx->rlid = sqp->ud_header.lrh.destination_lid; mlx->vcrc = 0; switch (wr->wr.opcode) { case IB_WR_SEND: sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; sqp->ud_header.immediate_present = 0; break; case IB_WR_SEND_WITH_IMM: sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; sqp->ud_header.immediate_present = 1; sqp->ud_header.immediate_data = wr->wr.ex.imm_data; break; default: return -EINVAL; } sqp->ud_header.lrh.virtual_lane = !qp->ibqp.qp_num ? 15 : 0; if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); if (!qp->ibqp.qp_num) ib_get_cached_pkey(&dev->ib_dev, qp->port, sqp->pkey_index, &pkey); else ib_get_cached_pkey(&dev->ib_dev, qp->port, wr->pkey_index, &pkey); sqp->ud_header.bth.pkey = cpu_to_be16(pkey); sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ? sqp->qkey : wr->remote_qkey); sqp->ud_header.deth.source_qpn = cpu_to_be32(qp->ibqp.qp_num); header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf + ind * MTHCA_UD_HEADER_SIZE); data->byte_count = cpu_to_be32(header_size); data->lkey = cpu_to_be32(to_mpd(qp->ibqp.pd)->ntmr.ibmr.lkey); data->addr = cpu_to_be64(sqp->header_dma + ind * MTHCA_UD_HEADER_SIZE); return 0; } static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq, struct ib_cq *ib_cq) { unsigned cur; struct mthca_cq *cq; cur = wq->head - wq->tail; if (likely(cur + nreq < wq->max)) return 0; cq = to_mcq(ib_cq); spin_lock(&cq->lock); cur = wq->head - wq->tail; spin_unlock(&cq->lock); return cur + nreq >= wq->max; } static __always_inline void set_raddr_seg(struct mthca_raddr_seg *rseg, u64 remote_addr, u32 rkey) { rseg->raddr = cpu_to_be64(remote_addr); rseg->rkey = cpu_to_be32(rkey); rseg->reserved = 0; } static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg, const struct ib_atomic_wr *wr) { if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { aseg->swap_add = cpu_to_be64(wr->swap); aseg->compare = cpu_to_be64(wr->compare_add); } else { aseg->swap_add = cpu_to_be64(wr->compare_add); aseg->compare = 0; } } static void set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg, const struct ib_ud_wr *wr) { useg->lkey = cpu_to_be32(to_mah(wr->ah)->key); useg->av_addr = cpu_to_be64(to_mah(wr->ah)->avdma); useg->dqpn = cpu_to_be32(wr->remote_qpn); useg->qkey = cpu_to_be32(wr->remote_qkey); } static void set_arbel_ud_seg(struct mthca_arbel_ud_seg *useg, const struct ib_ud_wr *wr) { memcpy(useg->av, to_mah(wr->ah)->av, MTHCA_AV_SIZE); useg->dqpn = cpu_to_be32(wr->remote_qpn); useg->qkey = cpu_to_be32(wr->remote_qkey); } int mthca_tavor_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) { struct mthca_dev *dev = to_mdev(ibqp->device); struct mthca_qp *qp = to_mqp(ibqp); void *wqe; void *prev_wqe; unsigned long flags; int err = 0; int nreq; int i; int size; /* * f0 and size0 are only used if nreq != 0, and they will * always be initialized the first time through the main loop * before nreq is incremented. So nreq cannot become non-zero * without initializing f0 and size0, and they are in fact * never used uninitialized. */ int size0; u32 f0; int ind; u8 op0 = 0; spin_lock_irqsave(&qp->sq.lock, flags); /* XXX check that state is OK to post send */ ind = qp->sq.next_ind; for (nreq = 0; wr; ++nreq, wr = wr->next) { if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { mthca_err(dev, "SQ %06x full (%u head, %u tail," " %d max, %d nreq)\n", qp->qpn, qp->sq.head, qp->sq.tail, qp->sq.max, nreq); err = -ENOMEM; *bad_wr = wr; goto out; } wqe = get_send_wqe(qp, ind); prev_wqe = qp->sq.last; qp->sq.last = wqe; ((struct mthca_next_seg *) wqe)->nda_op = 0; ((struct mthca_next_seg *) wqe)->ee_nds = 0; ((struct mthca_next_seg *) wqe)->flags = ((wr->send_flags & IB_SEND_SIGNALED) ? cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) | ((wr->send_flags & IB_SEND_SOLICITED) ? cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) | cpu_to_be32(1); if (wr->opcode == IB_WR_SEND_WITH_IMM || wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data; wqe += sizeof (struct mthca_next_seg); size = sizeof (struct mthca_next_seg) / 16; switch (qp->transport) { case RC: switch (wr->opcode) { case IB_WR_ATOMIC_CMP_AND_SWP: case IB_WR_ATOMIC_FETCH_AND_ADD: set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, atomic_wr(wr)->rkey); wqe += sizeof (struct mthca_raddr_seg); set_atomic_seg(wqe, atomic_wr(wr)); wqe += sizeof (struct mthca_atomic_seg); size += (sizeof (struct mthca_raddr_seg) + sizeof (struct mthca_atomic_seg)) / 16; break; case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE_WITH_IMM: case IB_WR_RDMA_READ: set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, rdma_wr(wr)->rkey); wqe += sizeof (struct mthca_raddr_seg); size += sizeof (struct mthca_raddr_seg) / 16; break; default: /* No extra segments required for sends */ break; } break; case UC: switch (wr->opcode) { case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE_WITH_IMM: set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, rdma_wr(wr)->rkey); wqe += sizeof (struct mthca_raddr_seg); size += sizeof (struct mthca_raddr_seg) / 16; break; default: /* No extra segments required for sends */ break; } break; case UD: set_tavor_ud_seg(wqe, ud_wr(wr)); wqe += sizeof (struct mthca_tavor_ud_seg); size += sizeof (struct mthca_tavor_ud_seg) / 16; break; case MLX: err = build_mlx_header( dev, qp, ind, ud_wr(wr), wqe - sizeof(struct mthca_next_seg), wqe); if (err) { *bad_wr = wr; goto out; } wqe += sizeof (struct mthca_data_seg); size += sizeof (struct mthca_data_seg) / 16; break; } if (wr->num_sge > qp->sq.max_gs) { mthca_err(dev, "too many gathers\n"); err = -EINVAL; *bad_wr = wr; goto out; } for (i = 0; i < wr->num_sge; ++i) { mthca_set_data_seg(wqe, wr->sg_list + i); wqe += sizeof (struct mthca_data_seg); size += sizeof (struct mthca_data_seg) / 16; } /* Add one more inline data segment for ICRC */ if (qp->transport == MLX) { ((struct mthca_data_seg *) wqe)->byte_count = cpu_to_be32((1 << 31) | 4); ((u32 *) wqe)[1] = 0; wqe += sizeof (struct mthca_data_seg); size += sizeof (struct mthca_data_seg) / 16; } qp->wrid[ind + qp->rq.max] = wr->wr_id; if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) { mthca_err(dev, "opcode invalid\n"); err = -EINVAL; *bad_wr = wr; goto out; } ((struct mthca_next_seg *) prev_wqe)->nda_op = cpu_to_be32(((ind << qp->sq.wqe_shift) + qp->send_wqe_offset) | mthca_opcode[wr->opcode]); wmb(); ((struct mthca_next_seg *) prev_wqe)->ee_nds = cpu_to_be32((nreq ? 0 : MTHCA_NEXT_DBD) | size | ((wr->send_flags & IB_SEND_FENCE) ? MTHCA_NEXT_FENCE : 0)); if (!nreq) { size0 = size; op0 = mthca_opcode[wr->opcode]; f0 = wr->send_flags & IB_SEND_FENCE ? MTHCA_SEND_DOORBELL_FENCE : 0; } ++ind; if (unlikely(ind >= qp->sq.max)) ind -= qp->sq.max; } out: if (likely(nreq)) { wmb(); mthca_write64(((qp->sq.next_ind << qp->sq.wqe_shift) + qp->send_wqe_offset) | f0 | op0, (qp->qpn << 8) | size0, dev->kar + MTHCA_SEND_DOORBELL, MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); } qp->sq.next_ind = ind; qp->sq.head += nreq; spin_unlock_irqrestore(&qp->sq.lock, flags); return err; } int mthca_tavor_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) { struct mthca_dev *dev = to_mdev(ibqp->device); struct mthca_qp *qp = to_mqp(ibqp); unsigned long flags; int err = 0; int nreq; int i; int size; /* * size0 is only used if nreq != 0, and it will always be * initialized the first time through the main loop before * nreq is incremented. So nreq cannot become non-zero * without initializing size0, and it is in fact never used * uninitialized. */ int size0; int ind; void *wqe; void *prev_wqe; spin_lock_irqsave(&qp->rq.lock, flags); /* XXX check that state is OK to post receive */ ind = qp->rq.next_ind; for (nreq = 0; wr; wr = wr->next) { if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { mthca_err(dev, "RQ %06x full (%u head, %u tail," " %d max, %d nreq)\n", qp->qpn, qp->rq.head, qp->rq.tail, qp->rq.max, nreq); err = -ENOMEM; *bad_wr = wr; goto out; } wqe = get_recv_wqe(qp, ind); prev_wqe = qp->rq.last; qp->rq.last = wqe; ((struct mthca_next_seg *) wqe)->ee_nds = cpu_to_be32(MTHCA_NEXT_DBD); ((struct mthca_next_seg *) wqe)->flags = 0; wqe += sizeof (struct mthca_next_seg); size = sizeof (struct mthca_next_seg) / 16; if (unlikely(wr->num_sge > qp->rq.max_gs)) { err = -EINVAL; *bad_wr = wr; goto out; } for (i = 0; i < wr->num_sge; ++i) { mthca_set_data_seg(wqe, wr->sg_list + i); wqe += sizeof (struct mthca_data_seg); size += sizeof (struct mthca_data_seg) / 16; } qp->wrid[ind] = wr->wr_id; ((struct mthca_next_seg *) prev_wqe)->ee_nds = cpu_to_be32(MTHCA_NEXT_DBD | size); if (!nreq) size0 = size; ++ind; if (unlikely(ind >= qp->rq.max)) ind -= qp->rq.max; ++nreq; if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { nreq = 0; wmb(); mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0, qp->qpn << 8, dev->kar + MTHCA_RECEIVE_DOORBELL, MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); qp->rq.next_ind = ind; qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; } } out: if (likely(nreq)) { wmb(); mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0, qp->qpn << 8 | nreq, dev->kar + MTHCA_RECEIVE_DOORBELL, MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); } qp->rq.next_ind = ind; qp->rq.head += nreq; spin_unlock_irqrestore(&qp->rq.lock, flags); return err; } int mthca_arbel_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) { struct mthca_dev *dev = to_mdev(ibqp->device); struct mthca_qp *qp = to_mqp(ibqp); u32 dbhi; void *wqe; void *prev_wqe; unsigned long flags; int err = 0; int nreq; int i; int size; /* * f0 and size0 are only used if nreq != 0, and they will * always be initialized the first time through the main loop * before nreq is incremented. So nreq cannot become non-zero * without initializing f0 and size0, and they are in fact * never used uninitialized. */ int size0; u32 f0; int ind; u8 op0 = 0; spin_lock_irqsave(&qp->sq.lock, flags); /* XXX check that state is OK to post send */ ind = qp->sq.head & (qp->sq.max - 1); for (nreq = 0; wr; ++nreq, wr = wr->next) { if (unlikely(nreq == MTHCA_ARBEL_MAX_WQES_PER_SEND_DB)) { nreq = 0; dbhi = (MTHCA_ARBEL_MAX_WQES_PER_SEND_DB << 24) | ((qp->sq.head & 0xffff) << 8) | f0 | op0; qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB; /* * Make sure that descriptors are written before * doorbell record. */ wmb(); *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff); /* * Make sure doorbell record is written before we * write MMIO send doorbell. */ wmb(); mthca_write64(dbhi, (qp->qpn << 8) | size0, dev->kar + MTHCA_SEND_DOORBELL, MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); } if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { mthca_err(dev, "SQ %06x full (%u head, %u tail," " %d max, %d nreq)\n", qp->qpn, qp->sq.head, qp->sq.tail, qp->sq.max, nreq); err = -ENOMEM; *bad_wr = wr; goto out; } wqe = get_send_wqe(qp, ind); prev_wqe = qp->sq.last; qp->sq.last = wqe; ((struct mthca_next_seg *) wqe)->flags = ((wr->send_flags & IB_SEND_SIGNALED) ? cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) | ((wr->send_flags & IB_SEND_SOLICITED) ? cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) | ((wr->send_flags & IB_SEND_IP_CSUM) ? cpu_to_be32(MTHCA_NEXT_IP_CSUM | MTHCA_NEXT_TCP_UDP_CSUM) : 0) | cpu_to_be32(1); if (wr->opcode == IB_WR_SEND_WITH_IMM || wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data; wqe += sizeof (struct mthca_next_seg); size = sizeof (struct mthca_next_seg) / 16; switch (qp->transport) { case RC: switch (wr->opcode) { case IB_WR_ATOMIC_CMP_AND_SWP: case IB_WR_ATOMIC_FETCH_AND_ADD: set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, atomic_wr(wr)->rkey); wqe += sizeof (struct mthca_raddr_seg); set_atomic_seg(wqe, atomic_wr(wr)); wqe += sizeof (struct mthca_atomic_seg); size += (sizeof (struct mthca_raddr_seg) + sizeof (struct mthca_atomic_seg)) / 16; break; case IB_WR_RDMA_READ: case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE_WITH_IMM: set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, rdma_wr(wr)->rkey); wqe += sizeof (struct mthca_raddr_seg); size += sizeof (struct mthca_raddr_seg) / 16; break; default: /* No extra segments required for sends */ break; } break; case UC: switch (wr->opcode) { case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE_WITH_IMM: set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, rdma_wr(wr)->rkey); wqe += sizeof (struct mthca_raddr_seg); size += sizeof (struct mthca_raddr_seg) / 16; break; default: /* No extra segments required for sends */ break; } break; case UD: set_arbel_ud_seg(wqe, ud_wr(wr)); wqe += sizeof (struct mthca_arbel_ud_seg); size += sizeof (struct mthca_arbel_ud_seg) / 16; break; case MLX: err = build_mlx_header( dev, qp, ind, ud_wr(wr), wqe - sizeof(struct mthca_next_seg), wqe); if (err) { *bad_wr = wr; goto out; } wqe += sizeof (struct mthca_data_seg); size += sizeof (struct mthca_data_seg) / 16; break; } if (wr->num_sge > qp->sq.max_gs) { mthca_err(dev, "too many gathers\n"); err = -EINVAL; *bad_wr = wr; goto out; } for (i = 0; i < wr->num_sge; ++i) { mthca_set_data_seg(wqe, wr->sg_list + i); wqe += sizeof (struct mthca_data_seg); size += sizeof (struct mthca_data_seg) / 16; } /* Add one more inline data segment for ICRC */ if (qp->transport == MLX) { ((struct mthca_data_seg *) wqe)->byte_count = cpu_to_be32((1 << 31) | 4); ((u32 *) wqe)[1] = 0; wqe += sizeof (struct mthca_data_seg); size += sizeof (struct mthca_data_seg) / 16; } qp->wrid[ind + qp->rq.max] = wr->wr_id; if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) { mthca_err(dev, "opcode invalid\n"); err = -EINVAL; *bad_wr = wr; goto out; } ((struct mthca_next_seg *) prev_wqe)->nda_op = cpu_to_be32(((ind << qp->sq.wqe_shift) + qp->send_wqe_offset) | mthca_opcode[wr->opcode]); wmb(); ((struct mthca_next_seg *) prev_wqe)->ee_nds = cpu_to_be32(MTHCA_NEXT_DBD | size | ((wr->send_flags & IB_SEND_FENCE) ? MTHCA_NEXT_FENCE : 0)); if (!nreq) { size0 = size; op0 = mthca_opcode[wr->opcode]; f0 = wr->send_flags & IB_SEND_FENCE ? MTHCA_SEND_DOORBELL_FENCE : 0; } ++ind; if (unlikely(ind >= qp->sq.max)) ind -= qp->sq.max; } out: if (likely(nreq)) { dbhi = (nreq << 24) | ((qp->sq.head & 0xffff) << 8) | f0 | op0; qp->sq.head += nreq; /* * Make sure that descriptors are written before * doorbell record. */ wmb(); *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff); /* * Make sure doorbell record is written before we * write MMIO send doorbell. */ wmb(); mthca_write64(dbhi, (qp->qpn << 8) | size0, dev->kar + MTHCA_SEND_DOORBELL, MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); } spin_unlock_irqrestore(&qp->sq.lock, flags); return err; } int mthca_arbel_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) { struct mthca_dev *dev = to_mdev(ibqp->device); struct mthca_qp *qp = to_mqp(ibqp); unsigned long flags; int err = 0; int nreq; int ind; int i; void *wqe; spin_lock_irqsave(&qp->rq.lock, flags); /* XXX check that state is OK to post receive */ ind = qp->rq.head & (qp->rq.max - 1); for (nreq = 0; wr; ++nreq, wr = wr->next) { if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { mthca_err(dev, "RQ %06x full (%u head, %u tail," " %d max, %d nreq)\n", qp->qpn, qp->rq.head, qp->rq.tail, qp->rq.max, nreq); err = -ENOMEM; *bad_wr = wr; goto out; } wqe = get_recv_wqe(qp, ind); ((struct mthca_next_seg *) wqe)->flags = 0; wqe += sizeof (struct mthca_next_seg); if (unlikely(wr->num_sge > qp->rq.max_gs)) { err = -EINVAL; *bad_wr = wr; goto out; } for (i = 0; i < wr->num_sge; ++i) { mthca_set_data_seg(wqe, wr->sg_list + i); wqe += sizeof (struct mthca_data_seg); } if (i < qp->rq.max_gs) mthca_set_data_seg_inval(wqe); qp->wrid[ind] = wr->wr_id; ++ind; if (unlikely(ind >= qp->rq.max)) ind -= qp->rq.max; } out: if (likely(nreq)) { qp->rq.head += nreq; /* * Make sure that descriptors are written before * doorbell record. */ wmb(); *qp->rq.db = cpu_to_be32(qp->rq.head & 0xffff); } spin_unlock_irqrestore(&qp->rq.lock, flags); return err; } void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, int index, int *dbd, __be32 *new_wqe) { struct mthca_next_seg *next; /* * For SRQs, all receive WQEs generate a CQE, so we're always * at the end of the doorbell chain. */ if (qp->ibqp.srq && !is_send) { *new_wqe = 0; return; } if (is_send) next = get_send_wqe(qp, index); else next = get_recv_wqe(qp, index); *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD)); if (next->ee_nds & cpu_to_be32(0x3f)) *new_wqe = (next->nda_op & cpu_to_be32(~0x3f)) | (next->ee_nds & cpu_to_be32(0x3f)); else *new_wqe = 0; } int mthca_init_qp_table(struct mthca_dev *dev) { int err; int i; spin_lock_init(&dev->qp_table.lock); /* * We reserve 2 extra QPs per port for the special QPs. The * special QP for port 1 has to be even, so round up. */ dev->qp_table.sqp_start = (dev->limits.reserved_qps + 1) & ~1UL; err = mthca_alloc_init(&dev->qp_table.alloc, dev->limits.num_qps, (1 << 24) - 1, dev->qp_table.sqp_start + MTHCA_MAX_PORTS * 2); if (err) return err; err = mthca_array_init(&dev->qp_table.qp, dev->limits.num_qps); if (err) { mthca_alloc_cleanup(&dev->qp_table.alloc); return err; } for (i = 0; i < 2; ++i) { err = mthca_CONF_SPECIAL_QP(dev, i ? IB_QPT_GSI : IB_QPT_SMI, dev->qp_table.sqp_start + i * 2); if (err) { mthca_warn(dev, "CONF_SPECIAL_QP returned " "%d, aborting.\n", err); goto err_out; } } return 0; err_out: for (i = 0; i < 2; ++i) mthca_CONF_SPECIAL_QP(dev, i, 0); mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); mthca_alloc_cleanup(&dev->qp_table.alloc); return err; } void mthca_cleanup_qp_table(struct mthca_dev *dev) { int i; for (i = 0; i < 2; ++i) mthca_CONF_SPECIAL_QP(dev, i, 0); mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); mthca_alloc_cleanup(&dev->qp_table.alloc); }
linux-master
drivers/infiniband/hw/mthca/mthca_qp.c
/* * Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/string.h> #include <linux/slab.h> #include <rdma/ib_verbs.h> #include <rdma/ib_cache.h> #include "mthca_dev.h" enum { MTHCA_RATE_TAVOR_FULL = 0, MTHCA_RATE_TAVOR_1X = 1, MTHCA_RATE_TAVOR_4X = 2, MTHCA_RATE_TAVOR_1X_DDR = 3 }; enum { MTHCA_RATE_MEMFREE_FULL = 0, MTHCA_RATE_MEMFREE_QUARTER = 1, MTHCA_RATE_MEMFREE_EIGHTH = 2, MTHCA_RATE_MEMFREE_HALF = 3 }; struct mthca_av { __be32 port_pd; u8 reserved1; u8 g_slid; __be16 dlid; u8 reserved2; u8 gid_index; u8 msg_sr; u8 hop_limit; __be32 sl_tclass_flowlabel; __be32 dgid[4]; }; static enum ib_rate memfree_rate_to_ib(u8 mthca_rate, u8 port_rate) { switch (mthca_rate) { case MTHCA_RATE_MEMFREE_EIGHTH: return mult_to_ib_rate(port_rate >> 3); case MTHCA_RATE_MEMFREE_QUARTER: return mult_to_ib_rate(port_rate >> 2); case MTHCA_RATE_MEMFREE_HALF: return mult_to_ib_rate(port_rate >> 1); case MTHCA_RATE_MEMFREE_FULL: default: return mult_to_ib_rate(port_rate); } } static enum ib_rate tavor_rate_to_ib(u8 mthca_rate, u8 port_rate) { switch (mthca_rate) { case MTHCA_RATE_TAVOR_1X: return IB_RATE_2_5_GBPS; case MTHCA_RATE_TAVOR_1X_DDR: return IB_RATE_5_GBPS; case MTHCA_RATE_TAVOR_4X: return IB_RATE_10_GBPS; default: return mult_to_ib_rate(port_rate); } } enum ib_rate mthca_rate_to_ib(struct mthca_dev *dev, u8 mthca_rate, u32 port) { if (mthca_is_memfree(dev)) { /* Handle old Arbel FW */ if (dev->limits.stat_rate_support == 0x3 && mthca_rate) return IB_RATE_2_5_GBPS; return memfree_rate_to_ib(mthca_rate, dev->rate[port - 1]); } else return tavor_rate_to_ib(mthca_rate, dev->rate[port - 1]); } static u8 ib_rate_to_memfree(u8 req_rate, u8 cur_rate) { if (cur_rate <= req_rate) return 0; /* * Inter-packet delay (IPD) to get from rate X down to a rate * no more than Y is (X - 1) / Y. */ switch ((cur_rate - 1) / req_rate) { case 0: return MTHCA_RATE_MEMFREE_FULL; case 1: return MTHCA_RATE_MEMFREE_HALF; case 2: case 3: return MTHCA_RATE_MEMFREE_QUARTER; default: return MTHCA_RATE_MEMFREE_EIGHTH; } } static u8 ib_rate_to_tavor(u8 static_rate) { switch (static_rate) { case IB_RATE_2_5_GBPS: return MTHCA_RATE_TAVOR_1X; case IB_RATE_5_GBPS: return MTHCA_RATE_TAVOR_1X_DDR; case IB_RATE_10_GBPS: return MTHCA_RATE_TAVOR_4X; default: return MTHCA_RATE_TAVOR_FULL; } } u8 mthca_get_rate(struct mthca_dev *dev, int static_rate, u32 port) { u8 rate; if (!static_rate || ib_rate_to_mult(static_rate) >= dev->rate[port - 1]) return 0; if (mthca_is_memfree(dev)) rate = ib_rate_to_memfree(ib_rate_to_mult(static_rate), dev->rate[port - 1]); else rate = ib_rate_to_tavor(static_rate); if (!(dev->limits.stat_rate_support & (1 << rate))) rate = 1; return rate; } int mthca_create_ah(struct mthca_dev *dev, struct mthca_pd *pd, struct rdma_ah_attr *ah_attr, struct mthca_ah *ah) { u32 index = -1; struct mthca_av *av = NULL; ah->type = MTHCA_AH_PCI_POOL; if (mthca_is_memfree(dev)) { ah->av = kmalloc(sizeof *ah->av, GFP_ATOMIC); if (!ah->av) return -ENOMEM; ah->type = MTHCA_AH_KMALLOC; av = ah->av; } else if (!atomic_read(&pd->sqp_count) && !(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) { index = mthca_alloc(&dev->av_table.alloc); /* fall back to allocate in host memory */ if (index == -1) goto on_hca_fail; av = kmalloc(sizeof *av, GFP_ATOMIC); if (!av) goto on_hca_fail; ah->type = MTHCA_AH_ON_HCA; ah->avdma = dev->av_table.ddr_av_base + index * MTHCA_AV_SIZE; } on_hca_fail: if (ah->type == MTHCA_AH_PCI_POOL) { ah->av = dma_pool_zalloc(dev->av_table.pool, GFP_ATOMIC, &ah->avdma); if (!ah->av) return -ENOMEM; av = ah->av; } ah->key = pd->ntmr.ibmr.lkey; av->port_pd = cpu_to_be32(pd->pd_num | (rdma_ah_get_port_num(ah_attr) << 24)); av->g_slid = rdma_ah_get_path_bits(ah_attr); av->dlid = cpu_to_be16(rdma_ah_get_dlid(ah_attr)); av->msg_sr = (3 << 4) | /* 2K message */ mthca_get_rate(dev, rdma_ah_get_static_rate(ah_attr), rdma_ah_get_port_num(ah_attr)); av->sl_tclass_flowlabel = cpu_to_be32(rdma_ah_get_sl(ah_attr) << 28); if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) { const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr); av->g_slid |= 0x80; av->gid_index = (rdma_ah_get_port_num(ah_attr) - 1) * dev->limits.gid_table_len + grh->sgid_index; av->hop_limit = grh->hop_limit; av->sl_tclass_flowlabel |= cpu_to_be32((grh->traffic_class << 20) | grh->flow_label); memcpy(av->dgid, grh->dgid.raw, 16); } else { /* Arbel workaround -- low byte of GID must be 2 */ av->dgid[3] = cpu_to_be32(2); } if (0) { int j; mthca_dbg(dev, "Created UDAV at %p/%08lx:\n", av, (unsigned long) ah->avdma); for (j = 0; j < 8; ++j) printk(KERN_DEBUG " [%2x] %08x\n", j * 4, be32_to_cpu(((__be32 *) av)[j])); } if (ah->type == MTHCA_AH_ON_HCA) { memcpy_toio(dev->av_table.av_map + index * MTHCA_AV_SIZE, av, MTHCA_AV_SIZE); kfree(av); } return 0; } int mthca_destroy_ah(struct mthca_dev *dev, struct mthca_ah *ah) { switch (ah->type) { case MTHCA_AH_ON_HCA: mthca_free(&dev->av_table.alloc, (ah->avdma - dev->av_table.ddr_av_base) / MTHCA_AV_SIZE); break; case MTHCA_AH_PCI_POOL: dma_pool_free(dev->av_table.pool, ah->av, ah->avdma); break; case MTHCA_AH_KMALLOC: kfree(ah->av); break; } return 0; } int mthca_ah_grh_present(struct mthca_ah *ah) { return !!(ah->av->g_slid & 0x80); } int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah, struct ib_ud_header *header) { if (ah->type == MTHCA_AH_ON_HCA) return -EINVAL; header->lrh.service_level = be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 28; header->lrh.destination_lid = ah->av->dlid; header->lrh.source_lid = cpu_to_be16(ah->av->g_slid & 0x7f); if (mthca_ah_grh_present(ah)) { header->grh.traffic_class = (be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 20) & 0xff; header->grh.flow_label = ah->av->sl_tclass_flowlabel & cpu_to_be32(0xfffff); header->grh.hop_limit = ah->av->hop_limit; header->grh.source_gid = ah->ibah.sgid_attr->gid; memcpy(header->grh.destination_gid.raw, ah->av->dgid, 16); } return 0; } int mthca_ah_query(struct ib_ah *ibah, struct rdma_ah_attr *attr) { struct mthca_ah *ah = to_mah(ibah); struct mthca_dev *dev = to_mdev(ibah->device); u32 port_num = be32_to_cpu(ah->av->port_pd) >> 24; /* Only implement for MAD and memfree ah for now. */ if (ah->type == MTHCA_AH_ON_HCA) return -ENOSYS; memset(attr, 0, sizeof *attr); attr->type = ibah->type; rdma_ah_set_dlid(attr, be16_to_cpu(ah->av->dlid)); rdma_ah_set_sl(attr, be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 28); rdma_ah_set_port_num(attr, port_num); rdma_ah_set_static_rate(attr, mthca_rate_to_ib(dev, ah->av->msg_sr & 0x7, port_num)); rdma_ah_set_path_bits(attr, ah->av->g_slid & 0x7F); if (mthca_ah_grh_present(ah)) { u32 tc_fl = be32_to_cpu(ah->av->sl_tclass_flowlabel); rdma_ah_set_grh(attr, NULL, tc_fl & 0xfffff, ah->av->gid_index & (dev->limits.gid_table_len - 1), ah->av->hop_limit, (tc_fl >> 20) & 0xff); rdma_ah_set_dgid_raw(attr, ah->av->dgid); } return 0; } int mthca_init_av_table(struct mthca_dev *dev) { int err; if (mthca_is_memfree(dev)) return 0; err = mthca_alloc_init(&dev->av_table.alloc, dev->av_table.num_ddr_avs, dev->av_table.num_ddr_avs - 1, 0); if (err) return err; dev->av_table.pool = dma_pool_create("mthca_av", &dev->pdev->dev, MTHCA_AV_SIZE, MTHCA_AV_SIZE, 0); if (!dev->av_table.pool) goto out_free_alloc; if (!(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) { dev->av_table.av_map = ioremap(pci_resource_start(dev->pdev, 4) + dev->av_table.ddr_av_base - dev->ddr_start, dev->av_table.num_ddr_avs * MTHCA_AV_SIZE); if (!dev->av_table.av_map) goto out_free_pool; } else dev->av_table.av_map = NULL; return 0; out_free_pool: dma_pool_destroy(dev->av_table.pool); out_free_alloc: mthca_alloc_cleanup(&dev->av_table.alloc); return -ENOMEM; } void mthca_cleanup_av_table(struct mthca_dev *dev) { if (mthca_is_memfree(dev)) return; if (dev->av_table.av_map) iounmap(dev->av_table.av_map); dma_pool_destroy(dev->av_table.pool); mthca_alloc_cleanup(&dev->av_table.alloc); }
linux-master
drivers/infiniband/hw/mthca/mthca_av.c
/* * Copyright (c) 2004 Topspin Communications. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/errno.h> #include <linux/slab.h> #include <linux/bitmap.h> #include "mthca_dev.h" /* Trivial bitmap-based allocator */ u32 mthca_alloc(struct mthca_alloc *alloc) { unsigned long flags; u32 obj; spin_lock_irqsave(&alloc->lock, flags); obj = find_next_zero_bit(alloc->table, alloc->max, alloc->last); if (obj >= alloc->max) { alloc->top = (alloc->top + alloc->max) & alloc->mask; obj = find_first_zero_bit(alloc->table, alloc->max); } if (obj < alloc->max) { __set_bit(obj, alloc->table); obj |= alloc->top; } else obj = -1; spin_unlock_irqrestore(&alloc->lock, flags); return obj; } void mthca_free(struct mthca_alloc *alloc, u32 obj) { unsigned long flags; obj &= alloc->max - 1; spin_lock_irqsave(&alloc->lock, flags); __clear_bit(obj, alloc->table); alloc->last = min(alloc->last, obj); alloc->top = (alloc->top + alloc->max) & alloc->mask; spin_unlock_irqrestore(&alloc->lock, flags); } int mthca_alloc_init(struct mthca_alloc *alloc, u32 num, u32 mask, u32 reserved) { /* num must be a power of 2 */ if (num != 1 << (ffs(num) - 1)) return -EINVAL; alloc->last = 0; alloc->top = 0; alloc->max = num; alloc->mask = mask; spin_lock_init(&alloc->lock); alloc->table = bitmap_zalloc(num, GFP_KERNEL); if (!alloc->table) return -ENOMEM; bitmap_set(alloc->table, 0, reserved); return 0; } void mthca_alloc_cleanup(struct mthca_alloc *alloc) { bitmap_free(alloc->table); } /* * Array of pointers with lazy allocation of leaf pages. Callers of * _get, _set and _clear methods must use a lock or otherwise * serialize access to the array. */ #define MTHCA_ARRAY_MASK (PAGE_SIZE / sizeof (void *) - 1) void *mthca_array_get(struct mthca_array *array, int index) { int p = (index * sizeof (void *)) >> PAGE_SHIFT; if (array->page_list[p].page) return array->page_list[p].page[index & MTHCA_ARRAY_MASK]; else return NULL; } int mthca_array_set(struct mthca_array *array, int index, void *value) { int p = (index * sizeof (void *)) >> PAGE_SHIFT; /* Allocate with GFP_ATOMIC because we'll be called with locks held. */ if (!array->page_list[p].page) array->page_list[p].page = (void **) get_zeroed_page(GFP_ATOMIC); if (!array->page_list[p].page) return -ENOMEM; array->page_list[p].page[index & MTHCA_ARRAY_MASK] = value; ++array->page_list[p].used; return 0; } void mthca_array_clear(struct mthca_array *array, int index) { int p = (index * sizeof (void *)) >> PAGE_SHIFT; if (--array->page_list[p].used == 0) { free_page((unsigned long) array->page_list[p].page); array->page_list[p].page = NULL; } else array->page_list[p].page[index & MTHCA_ARRAY_MASK] = NULL; if (array->page_list[p].used < 0) pr_debug("Array %p index %d page %d with ref count %d < 0\n", array, index, p, array->page_list[p].used); } int mthca_array_init(struct mthca_array *array, int nent) { int npage = (nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE; int i; array->page_list = kmalloc_array(npage, sizeof(*array->page_list), GFP_KERNEL); if (!array->page_list) return -ENOMEM; for (i = 0; i < npage; ++i) { array->page_list[i].page = NULL; array->page_list[i].used = 0; } return 0; } void mthca_array_cleanup(struct mthca_array *array, int nent) { int i; for (i = 0; i < (nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE; ++i) free_page((unsigned long) array->page_list[i].page); kfree(array->page_list); } /* * Handling for queue buffers -- we allocate a bunch of memory and * register it in a memory region at HCA virtual address 0. If the * requested size is > max_direct, we split the allocation into * multiple pages, so we don't require too much contiguous memory. */ int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct, union mthca_buf *buf, int *is_direct, struct mthca_pd *pd, int hca_write, struct mthca_mr *mr) { int err = -ENOMEM; int npages, shift; u64 *dma_list = NULL; dma_addr_t t; int i; if (size <= max_direct) { *is_direct = 1; npages = 1; shift = get_order(size) + PAGE_SHIFT; buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev, size, &t, GFP_KERNEL); if (!buf->direct.buf) return -ENOMEM; dma_unmap_addr_set(&buf->direct, mapping, t); while (t & ((1 << shift) - 1)) { --shift; npages *= 2; } dma_list = kmalloc_array(npages, sizeof(*dma_list), GFP_KERNEL); if (!dma_list) goto err_free; for (i = 0; i < npages; ++i) dma_list[i] = t + i * (1 << shift); } else { *is_direct = 0; npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; shift = PAGE_SHIFT; dma_list = kmalloc_array(npages, sizeof(*dma_list), GFP_KERNEL); if (!dma_list) return -ENOMEM; buf->page_list = kmalloc_array(npages, sizeof(*buf->page_list), GFP_KERNEL); if (!buf->page_list) goto err_out; for (i = 0; i < npages; ++i) buf->page_list[i].buf = NULL; for (i = 0; i < npages; ++i) { buf->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, &t, GFP_KERNEL); if (!buf->page_list[i].buf) goto err_free; dma_list[i] = t; dma_unmap_addr_set(&buf->page_list[i], mapping, t); clear_page(buf->page_list[i].buf); } } err = mthca_mr_alloc_phys(dev, pd->pd_num, dma_list, shift, npages, 0, size, MTHCA_MPT_FLAG_LOCAL_READ | (hca_write ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0), mr); if (err) goto err_free; kfree(dma_list); return 0; err_free: mthca_buf_free(dev, size, buf, *is_direct, NULL); err_out: kfree(dma_list); return err; } void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf, int is_direct, struct mthca_mr *mr) { int i; if (mr) mthca_free_mr(dev, mr); if (is_direct) dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf, dma_unmap_addr(&buf->direct, mapping)); else { for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i) dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, buf->page_list[i].buf, dma_unmap_addr(&buf->page_list[i], mapping)); kfree(buf->page_list); } }
linux-master
drivers/infiniband/hw/mthca/mthca_allocator.c
/* * Copyright (c) 2004 Topspin Communications. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/string.h> #include <linux/gfp.h> #include "mthca_dev.h" #include "mthca_cmd.h" struct mthca_mgm { __be32 next_gid_index; u32 reserved[3]; u8 gid[16]; __be32 qp[MTHCA_QP_PER_MGM]; }; static const u8 zero_gid[16]; /* automatically initialized to 0 */ /* * Caller must hold MCG table semaphore. gid and mgm parameters must * be properly aligned for command interface. * * Returns 0 unless a firmware command error occurs. * * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1 * and *mgm holds MGM entry. * * if GID is found in AMGM, *index = index in AMGM, *prev = index of * previous entry in hash chain and *mgm holds AMGM entry. * * If no AMGM exists for given gid, *index = -1, *prev = index of last * entry in hash chain and *mgm holds end of hash chain. */ static int find_mgm(struct mthca_dev *dev, u8 *gid, struct mthca_mailbox *mgm_mailbox, u16 *hash, int *prev, int *index) { struct mthca_mailbox *mailbox; struct mthca_mgm *mgm = mgm_mailbox->buf; u8 *mgid; int err; mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) return -ENOMEM; mgid = mailbox->buf; memcpy(mgid, gid, 16); err = mthca_MGID_HASH(dev, mailbox, hash); if (err) { mthca_err(dev, "MGID_HASH failed (%d)\n", err); goto out; } if (0) mthca_dbg(dev, "Hash for %pI6 is %04x\n", gid, *hash); *index = *hash; *prev = -1; do { err = mthca_READ_MGM(dev, *index, mgm_mailbox); if (err) { mthca_err(dev, "READ_MGM failed (%d)\n", err); goto out; } if (!memcmp(mgm->gid, zero_gid, 16)) { if (*index != *hash) { mthca_err(dev, "Found zero MGID in AMGM.\n"); err = -EINVAL; } goto out; } if (!memcmp(mgm->gid, gid, 16)) goto out; *prev = *index; *index = be32_to_cpu(mgm->next_gid_index) >> 6; } while (*index); *index = -1; out: mthca_free_mailbox(dev, mailbox); return err; } int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) { struct mthca_dev *dev = to_mdev(ibqp->device); struct mthca_mailbox *mailbox; struct mthca_mgm *mgm; u16 hash; int index, prev; int link = 0; int i; int err; mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); mgm = mailbox->buf; mutex_lock(&dev->mcg_table.mutex); err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index); if (err) goto out; if (index != -1) { if (!memcmp(mgm->gid, zero_gid, 16)) memcpy(mgm->gid, gid->raw, 16); } else { link = 1; index = mthca_alloc(&dev->mcg_table.alloc); if (index == -1) { mthca_err(dev, "No AMGM entries left\n"); err = -ENOMEM; goto out; } err = mthca_READ_MGM(dev, index, mailbox); if (err) { mthca_err(dev, "READ_MGM failed (%d)\n", err); goto out; } memset(mgm, 0, sizeof *mgm); memcpy(mgm->gid, gid->raw, 16); } for (i = 0; i < MTHCA_QP_PER_MGM; ++i) if (mgm->qp[i] == cpu_to_be32(ibqp->qp_num | (1 << 31))) { mthca_dbg(dev, "QP %06x already a member of MGM\n", ibqp->qp_num); err = 0; goto out; } else if (!(mgm->qp[i] & cpu_to_be32(1 << 31))) { mgm->qp[i] = cpu_to_be32(ibqp->qp_num | (1 << 31)); break; } if (i == MTHCA_QP_PER_MGM) { mthca_err(dev, "MGM at index %x is full.\n", index); err = -ENOMEM; goto out; } err = mthca_WRITE_MGM(dev, index, mailbox); if (err) { mthca_err(dev, "WRITE_MGM failed %d\n", err); err = -EINVAL; goto out; } if (!link) goto out; err = mthca_READ_MGM(dev, prev, mailbox); if (err) { mthca_err(dev, "READ_MGM failed %d\n", err); goto out; } mgm->next_gid_index = cpu_to_be32(index << 6); err = mthca_WRITE_MGM(dev, prev, mailbox); if (err) mthca_err(dev, "WRITE_MGM returned %d\n", err); out: if (err && link && index != -1) { BUG_ON(index < dev->limits.num_mgms); mthca_free(&dev->mcg_table.alloc, index); } mutex_unlock(&dev->mcg_table.mutex); mthca_free_mailbox(dev, mailbox); return err; } int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) { struct mthca_dev *dev = to_mdev(ibqp->device); struct mthca_mailbox *mailbox; struct mthca_mgm *mgm; u16 hash; int prev, index; int i, loc; int err; mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); mgm = mailbox->buf; mutex_lock(&dev->mcg_table.mutex); err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index); if (err) goto out; if (index == -1) { mthca_err(dev, "MGID %pI6 not found\n", gid->raw); err = -EINVAL; goto out; } for (loc = -1, i = 0; i < MTHCA_QP_PER_MGM; ++i) { if (mgm->qp[i] == cpu_to_be32(ibqp->qp_num | (1 << 31))) loc = i; if (!(mgm->qp[i] & cpu_to_be32(1 << 31))) break; } if (loc == -1) { mthca_err(dev, "QP %06x not found in MGM\n", ibqp->qp_num); err = -EINVAL; goto out; } mgm->qp[loc] = mgm->qp[i - 1]; mgm->qp[i - 1] = 0; err = mthca_WRITE_MGM(dev, index, mailbox); if (err) { mthca_err(dev, "WRITE_MGM returned %d\n", err); goto out; } if (i != 1) goto out; if (prev == -1) { /* Remove entry from MGM */ int amgm_index_to_free = be32_to_cpu(mgm->next_gid_index) >> 6; if (amgm_index_to_free) { err = mthca_READ_MGM(dev, amgm_index_to_free, mailbox); if (err) { mthca_err(dev, "READ_MGM returned %d\n", err); goto out; } } else memset(mgm->gid, 0, 16); err = mthca_WRITE_MGM(dev, index, mailbox); if (err) { mthca_err(dev, "WRITE_MGM returned %d\n", err); goto out; } if (amgm_index_to_free) { BUG_ON(amgm_index_to_free < dev->limits.num_mgms); mthca_free(&dev->mcg_table.alloc, amgm_index_to_free); } } else { /* Remove entry from AMGM */ int curr_next_index = be32_to_cpu(mgm->next_gid_index) >> 6; err = mthca_READ_MGM(dev, prev, mailbox); if (err) { mthca_err(dev, "READ_MGM returned %d\n", err); goto out; } mgm->next_gid_index = cpu_to_be32(curr_next_index << 6); err = mthca_WRITE_MGM(dev, prev, mailbox); if (err) { mthca_err(dev, "WRITE_MGM returned %d\n", err); goto out; } BUG_ON(index < dev->limits.num_mgms); mthca_free(&dev->mcg_table.alloc, index); } out: mutex_unlock(&dev->mcg_table.mutex); mthca_free_mailbox(dev, mailbox); return err; } int mthca_init_mcg_table(struct mthca_dev *dev) { int err; int table_size = dev->limits.num_mgms + dev->limits.num_amgms; err = mthca_alloc_init(&dev->mcg_table.alloc, table_size, table_size - 1, dev->limits.num_mgms); if (err) return err; mutex_init(&dev->mcg_table.mutex); return 0; } void mthca_cleanup_mcg_table(struct mthca_dev *dev) { mthca_alloc_cleanup(&dev->mcg_table.alloc); }
linux-master
drivers/infiniband/hw/mthca/mthca_mcg.c
/* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Cisco Systems. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/mm.h> #include <linux/scatterlist.h> #include <linux/sched.h> #include <linux/slab.h> #include <asm/page.h> #include "mthca_memfree.h" #include "mthca_dev.h" #include "mthca_cmd.h" /* * We allocate in as big chunks as we can, up to a maximum of 256 KB * per chunk. */ enum { MTHCA_ICM_ALLOC_SIZE = 1 << 18, MTHCA_TABLE_CHUNK_SIZE = 1 << 18 }; struct mthca_user_db_table { struct mutex mutex; struct { u64 uvirt; struct scatterlist mem; int refcount; } page[]; }; static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *chunk) { int i; if (chunk->nsg > 0) dma_unmap_sg(&dev->pdev->dev, chunk->mem, chunk->npages, DMA_BIDIRECTIONAL); for (i = 0; i < chunk->npages; ++i) __free_pages(sg_page(&chunk->mem[i]), get_order(chunk->mem[i].length)); } static void mthca_free_icm_coherent(struct mthca_dev *dev, struct mthca_icm_chunk *chunk) { int i; for (i = 0; i < chunk->npages; ++i) { dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length, lowmem_page_address(sg_page(&chunk->mem[i])), sg_dma_address(&chunk->mem[i])); } } void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm, int coherent) { struct mthca_icm_chunk *chunk, *tmp; if (!icm) return; list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) { if (coherent) mthca_free_icm_coherent(dev, chunk); else mthca_free_icm_pages(dev, chunk); kfree(chunk); } kfree(icm); } static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask) { struct page *page; /* * Use __GFP_ZERO because buggy firmware assumes ICM pages are * cleared, and subtle failures are seen if they aren't. */ page = alloc_pages(gfp_mask | __GFP_ZERO, order); if (!page) return -ENOMEM; sg_set_page(mem, page, PAGE_SIZE << order, 0); return 0; } static int mthca_alloc_icm_coherent(struct device *dev, struct scatterlist *mem, int order, gfp_t gfp_mask) { void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order, &sg_dma_address(mem), gfp_mask); if (!buf) return -ENOMEM; sg_set_buf(mem, buf, PAGE_SIZE << order); BUG_ON(mem->offset); sg_dma_len(mem) = PAGE_SIZE << order; return 0; } struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages, gfp_t gfp_mask, int coherent) { struct mthca_icm *icm; struct mthca_icm_chunk *chunk = NULL; int cur_order; int ret; /* We use sg_set_buf for coherent allocs, which assumes low memory */ BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM)); icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); if (!icm) return icm; icm->refcount = 0; INIT_LIST_HEAD(&icm->chunk_list); cur_order = get_order(MTHCA_ICM_ALLOC_SIZE); while (npages > 0) { if (!chunk) { chunk = kmalloc(sizeof *chunk, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); if (!chunk) goto fail; sg_init_table(chunk->mem, MTHCA_ICM_CHUNK_LEN); chunk->npages = 0; chunk->nsg = 0; list_add_tail(&chunk->list, &icm->chunk_list); } while (1 << cur_order > npages) --cur_order; if (coherent) ret = mthca_alloc_icm_coherent(&dev->pdev->dev, &chunk->mem[chunk->npages], cur_order, gfp_mask); else ret = mthca_alloc_icm_pages(&chunk->mem[chunk->npages], cur_order, gfp_mask); if (!ret) { ++chunk->npages; if (coherent) ++chunk->nsg; else if (chunk->npages == MTHCA_ICM_CHUNK_LEN) { chunk->nsg = dma_map_sg(&dev->pdev->dev, chunk->mem, chunk->npages, DMA_BIDIRECTIONAL); if (chunk->nsg <= 0) goto fail; } if (chunk->npages == MTHCA_ICM_CHUNK_LEN) chunk = NULL; npages -= 1 << cur_order; } else { --cur_order; if (cur_order < 0) goto fail; } } if (!coherent && chunk) { chunk->nsg = dma_map_sg(&dev->pdev->dev, chunk->mem, chunk->npages, DMA_BIDIRECTIONAL); if (chunk->nsg <= 0) goto fail; } return icm; fail: mthca_free_icm(dev, icm, coherent); return NULL; } int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int obj) { int i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE; int ret = 0; mutex_lock(&table->mutex); if (table->icm[i]) { ++table->icm[i]->refcount; goto out; } table->icm[i] = mthca_alloc_icm(dev, MTHCA_TABLE_CHUNK_SIZE >> PAGE_SHIFT, (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) | __GFP_NOWARN, table->coherent); if (!table->icm[i]) { ret = -ENOMEM; goto out; } if (mthca_MAP_ICM(dev, table->icm[i], table->virt + i * MTHCA_TABLE_CHUNK_SIZE)) { mthca_free_icm(dev, table->icm[i], table->coherent); table->icm[i] = NULL; ret = -ENOMEM; goto out; } ++table->icm[i]->refcount; out: mutex_unlock(&table->mutex); return ret; } void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int obj) { int i; if (!mthca_is_memfree(dev)) return; i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE; mutex_lock(&table->mutex); if (--table->icm[i]->refcount == 0) { mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE, MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE); mthca_free_icm(dev, table->icm[i], table->coherent); table->icm[i] = NULL; } mutex_unlock(&table->mutex); } void *mthca_table_find(struct mthca_icm_table *table, int obj, dma_addr_t *dma_handle) { int idx, offset, dma_offset, i; struct mthca_icm_chunk *chunk; struct mthca_icm *icm; struct page *page = NULL; if (!table->lowmem) return NULL; mutex_lock(&table->mutex); idx = (obj & (table->num_obj - 1)) * table->obj_size; icm = table->icm[idx / MTHCA_TABLE_CHUNK_SIZE]; dma_offset = offset = idx % MTHCA_TABLE_CHUNK_SIZE; if (!icm) goto out; list_for_each_entry(chunk, &icm->chunk_list, list) { for (i = 0; i < chunk->npages; ++i) { if (dma_handle && dma_offset >= 0) { if (sg_dma_len(&chunk->mem[i]) > dma_offset) *dma_handle = sg_dma_address(&chunk->mem[i]) + dma_offset; dma_offset -= sg_dma_len(&chunk->mem[i]); } /* DMA mapping can merge pages but not split them, * so if we found the page, dma_handle has already * been assigned to. */ if (chunk->mem[i].length > offset) { page = sg_page(&chunk->mem[i]); goto out; } offset -= chunk->mem[i].length; } } out: mutex_unlock(&table->mutex); return page ? lowmem_page_address(page) + offset : NULL; } int mthca_table_get_range(struct mthca_dev *dev, struct mthca_icm_table *table, int start, int end) { int inc = MTHCA_TABLE_CHUNK_SIZE / table->obj_size; int i, err; for (i = start; i <= end; i += inc) { err = mthca_table_get(dev, table, i); if (err) goto fail; } return 0; fail: while (i > start) { i -= inc; mthca_table_put(dev, table, i); } return err; } void mthca_table_put_range(struct mthca_dev *dev, struct mthca_icm_table *table, int start, int end) { int i; if (!mthca_is_memfree(dev)) return; for (i = start; i <= end; i += MTHCA_TABLE_CHUNK_SIZE / table->obj_size) mthca_table_put(dev, table, i); } struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev, u64 virt, int obj_size, int nobj, int reserved, int use_lowmem, int use_coherent) { struct mthca_icm_table *table; int obj_per_chunk; int num_icm; unsigned chunk_size; int i; obj_per_chunk = MTHCA_TABLE_CHUNK_SIZE / obj_size; num_icm = DIV_ROUND_UP(nobj, obj_per_chunk); table = kmalloc(struct_size(table, icm, num_icm), GFP_KERNEL); if (!table) return NULL; table->virt = virt; table->num_icm = num_icm; table->num_obj = nobj; table->obj_size = obj_size; table->lowmem = use_lowmem; table->coherent = use_coherent; mutex_init(&table->mutex); for (i = 0; i < num_icm; ++i) table->icm[i] = NULL; for (i = 0; i * MTHCA_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) { chunk_size = MTHCA_TABLE_CHUNK_SIZE; if ((i + 1) * MTHCA_TABLE_CHUNK_SIZE > nobj * obj_size) chunk_size = nobj * obj_size - i * MTHCA_TABLE_CHUNK_SIZE; table->icm[i] = mthca_alloc_icm(dev, chunk_size >> PAGE_SHIFT, (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) | __GFP_NOWARN, use_coherent); if (!table->icm[i]) goto err; if (mthca_MAP_ICM(dev, table->icm[i], virt + i * MTHCA_TABLE_CHUNK_SIZE)) { mthca_free_icm(dev, table->icm[i], table->coherent); table->icm[i] = NULL; goto err; } /* * Add a reference to this ICM chunk so that it never * gets freed (since it contains reserved firmware objects). */ ++table->icm[i]->refcount; } return table; err: for (i = 0; i < num_icm; ++i) if (table->icm[i]) { mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE, MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE); mthca_free_icm(dev, table->icm[i], table->coherent); } kfree(table); return NULL; } void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table) { int i; for (i = 0; i < table->num_icm; ++i) if (table->icm[i]) { mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE, MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE); mthca_free_icm(dev, table->icm[i], table->coherent); } kfree(table); } static u64 mthca_uarc_virt(struct mthca_dev *dev, struct mthca_uar *uar, int page) { return dev->uar_table.uarc_base + uar->index * dev->uar_table.uarc_size + page * MTHCA_ICM_PAGE_SIZE; } int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar, struct mthca_user_db_table *db_tab, int index, u64 uaddr) { struct page *pages[1]; int ret = 0; int i; if (!mthca_is_memfree(dev)) return 0; if (index < 0 || index > dev->uar_table.uarc_size / 8) return -EINVAL; mutex_lock(&db_tab->mutex); i = index / MTHCA_DB_REC_PER_PAGE; if ((db_tab->page[i].refcount >= MTHCA_DB_REC_PER_PAGE) || (db_tab->page[i].uvirt && db_tab->page[i].uvirt != uaddr) || (uaddr & 4095)) { ret = -EINVAL; goto out; } if (db_tab->page[i].refcount) { ++db_tab->page[i].refcount; goto out; } ret = pin_user_pages_fast(uaddr & PAGE_MASK, 1, FOLL_WRITE | FOLL_LONGTERM, pages); if (ret < 0) goto out; sg_set_page(&db_tab->page[i].mem, pages[0], MTHCA_ICM_PAGE_SIZE, uaddr & ~PAGE_MASK); ret = dma_map_sg(&dev->pdev->dev, &db_tab->page[i].mem, 1, DMA_TO_DEVICE); if (ret < 0) { unpin_user_page(pages[0]); goto out; } ret = mthca_MAP_ICM_page(dev, sg_dma_address(&db_tab->page[i].mem), mthca_uarc_virt(dev, uar, i)); if (ret) { dma_unmap_sg(&dev->pdev->dev, &db_tab->page[i].mem, 1, DMA_TO_DEVICE); unpin_user_page(sg_page(&db_tab->page[i].mem)); goto out; } db_tab->page[i].uvirt = uaddr; db_tab->page[i].refcount = 1; out: mutex_unlock(&db_tab->mutex); return ret; } void mthca_unmap_user_db(struct mthca_dev *dev, struct mthca_uar *uar, struct mthca_user_db_table *db_tab, int index) { if (!mthca_is_memfree(dev)) return; /* * To make our bookkeeping simpler, we don't unmap DB * pages until we clean up the whole db table. */ mutex_lock(&db_tab->mutex); --db_tab->page[index / MTHCA_DB_REC_PER_PAGE].refcount; mutex_unlock(&db_tab->mutex); } struct mthca_user_db_table *mthca_init_user_db_tab(struct mthca_dev *dev) { struct mthca_user_db_table *db_tab; int npages; int i; if (!mthca_is_memfree(dev)) return NULL; npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; db_tab = kmalloc(struct_size(db_tab, page, npages), GFP_KERNEL); if (!db_tab) return ERR_PTR(-ENOMEM); mutex_init(&db_tab->mutex); for (i = 0; i < npages; ++i) { db_tab->page[i].refcount = 0; db_tab->page[i].uvirt = 0; sg_init_table(&db_tab->page[i].mem, 1); } return db_tab; } void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar, struct mthca_user_db_table *db_tab) { int i; if (!mthca_is_memfree(dev)) return; for (i = 0; i < dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; ++i) { if (db_tab->page[i].uvirt) { mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1); dma_unmap_sg(&dev->pdev->dev, &db_tab->page[i].mem, 1, DMA_TO_DEVICE); unpin_user_page(sg_page(&db_tab->page[i].mem)); } } kfree(db_tab); } int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type, u32 qn, __be32 **db) { int group; int start, end, dir; int i, j; struct mthca_db_page *page; int ret = 0; mutex_lock(&dev->db_tab->mutex); switch (type) { case MTHCA_DB_TYPE_CQ_ARM: case MTHCA_DB_TYPE_SQ: group = 0; start = 0; end = dev->db_tab->max_group1; dir = 1; break; case MTHCA_DB_TYPE_CQ_SET_CI: case MTHCA_DB_TYPE_RQ: case MTHCA_DB_TYPE_SRQ: group = 1; start = dev->db_tab->npages - 1; end = dev->db_tab->min_group2; dir = -1; break; default: ret = -EINVAL; goto out; } for (i = start; i != end; i += dir) if (dev->db_tab->page[i].db_rec && !bitmap_full(dev->db_tab->page[i].used, MTHCA_DB_REC_PER_PAGE)) { page = dev->db_tab->page + i; goto found; } for (i = start; i != end; i += dir) if (!dev->db_tab->page[i].db_rec) { page = dev->db_tab->page + i; goto alloc; } if (dev->db_tab->max_group1 >= dev->db_tab->min_group2 - 1) { ret = -ENOMEM; goto out; } if (group == 0) ++dev->db_tab->max_group1; else --dev->db_tab->min_group2; page = dev->db_tab->page + end; alloc: page->db_rec = dma_alloc_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE, &page->mapping, GFP_KERNEL); if (!page->db_rec) { ret = -ENOMEM; goto out; } ret = mthca_MAP_ICM_page(dev, page->mapping, mthca_uarc_virt(dev, &dev->driver_uar, i)); if (ret) { dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE, page->db_rec, page->mapping); goto out; } bitmap_zero(page->used, MTHCA_DB_REC_PER_PAGE); found: j = find_first_zero_bit(page->used, MTHCA_DB_REC_PER_PAGE); set_bit(j, page->used); if (group == 1) j = MTHCA_DB_REC_PER_PAGE - 1 - j; ret = i * MTHCA_DB_REC_PER_PAGE + j; page->db_rec[j] = cpu_to_be64((qn << 8) | (type << 5)); *db = (__be32 *) &page->db_rec[j]; out: mutex_unlock(&dev->db_tab->mutex); return ret; } void mthca_free_db(struct mthca_dev *dev, int type, int db_index) { int i, j; struct mthca_db_page *page; i = db_index / MTHCA_DB_REC_PER_PAGE; j = db_index % MTHCA_DB_REC_PER_PAGE; page = dev->db_tab->page + i; mutex_lock(&dev->db_tab->mutex); page->db_rec[j] = 0; if (i >= dev->db_tab->min_group2) j = MTHCA_DB_REC_PER_PAGE - 1 - j; clear_bit(j, page->used); if (bitmap_empty(page->used, MTHCA_DB_REC_PER_PAGE) && i >= dev->db_tab->max_group1 - 1) { mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1); dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE, page->db_rec, page->mapping); page->db_rec = NULL; if (i == dev->db_tab->max_group1) { --dev->db_tab->max_group1; /* XXX may be able to unmap more pages now */ } if (i == dev->db_tab->min_group2) ++dev->db_tab->min_group2; } mutex_unlock(&dev->db_tab->mutex); } int mthca_init_db_tab(struct mthca_dev *dev) { int i; if (!mthca_is_memfree(dev)) return 0; dev->db_tab = kmalloc(sizeof *dev->db_tab, GFP_KERNEL); if (!dev->db_tab) return -ENOMEM; mutex_init(&dev->db_tab->mutex); dev->db_tab->npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; dev->db_tab->max_group1 = 0; dev->db_tab->min_group2 = dev->db_tab->npages - 1; dev->db_tab->page = kmalloc_array(dev->db_tab->npages, sizeof(*dev->db_tab->page), GFP_KERNEL); if (!dev->db_tab->page) { kfree(dev->db_tab); return -ENOMEM; } for (i = 0; i < dev->db_tab->npages; ++i) dev->db_tab->page[i].db_rec = NULL; return 0; } void mthca_cleanup_db_tab(struct mthca_dev *dev) { int i; if (!mthca_is_memfree(dev)) return; /* * Because we don't always free our UARC pages when they * become empty to make mthca_free_db() simpler we need to * make a sweep through the doorbell pages and free any * leftover pages now. */ for (i = 0; i < dev->db_tab->npages; ++i) { if (!dev->db_tab->page[i].db_rec) continue; if (!bitmap_empty(dev->db_tab->page[i].used, MTHCA_DB_REC_PER_PAGE)) mthca_warn(dev, "Kernel UARC page %d not empty\n", i); mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1); dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE, dev->db_tab->page[i].db_rec, dev->db_tab->page[i].mapping); } kfree(dev->db_tab->page); kfree(dev->db_tab); }
linux-master
drivers/infiniband/hw/mthca/mthca_memfree.c
/* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/slab.h> #include "mthca_dev.h" #include "mthca_cmd.h" #include "mthca_config_reg.h" enum { MTHCA_NUM_ASYNC_EQE = 0x80, MTHCA_NUM_CMD_EQE = 0x80, MTHCA_NUM_SPARE_EQE = 0x80, MTHCA_EQ_ENTRY_SIZE = 0x20 }; /* * Must be packed because start is 64 bits but only aligned to 32 bits. */ struct mthca_eq_context { __be32 flags; __be64 start; __be32 logsize_usrpage; __be32 tavor_pd; /* reserved for Arbel */ u8 reserved1[3]; u8 intr; __be32 arbel_pd; /* lost_count for Tavor */ __be32 lkey; u32 reserved2[2]; __be32 consumer_index; __be32 producer_index; u32 reserved3[4]; } __packed; #define MTHCA_EQ_STATUS_OK ( 0 << 28) #define MTHCA_EQ_STATUS_OVERFLOW ( 9 << 28) #define MTHCA_EQ_STATUS_WRITE_FAIL (10 << 28) #define MTHCA_EQ_OWNER_SW ( 0 << 24) #define MTHCA_EQ_OWNER_HW ( 1 << 24) #define MTHCA_EQ_FLAG_TR ( 1 << 18) #define MTHCA_EQ_FLAG_OI ( 1 << 17) #define MTHCA_EQ_STATE_ARMED ( 1 << 8) #define MTHCA_EQ_STATE_FIRED ( 2 << 8) #define MTHCA_EQ_STATE_ALWAYS_ARMED ( 3 << 8) #define MTHCA_EQ_STATE_ARBEL ( 8 << 8) enum { MTHCA_EVENT_TYPE_COMP = 0x00, MTHCA_EVENT_TYPE_PATH_MIG = 0x01, MTHCA_EVENT_TYPE_COMM_EST = 0x02, MTHCA_EVENT_TYPE_SQ_DRAINED = 0x03, MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE = 0x13, MTHCA_EVENT_TYPE_SRQ_LIMIT = 0x14, MTHCA_EVENT_TYPE_CQ_ERROR = 0x04, MTHCA_EVENT_TYPE_WQ_CATAS_ERROR = 0x05, MTHCA_EVENT_TYPE_EEC_CATAS_ERROR = 0x06, MTHCA_EVENT_TYPE_PATH_MIG_FAILED = 0x07, MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10, MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11, MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12, MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR = 0x08, MTHCA_EVENT_TYPE_PORT_CHANGE = 0x09, MTHCA_EVENT_TYPE_EQ_OVERFLOW = 0x0f, MTHCA_EVENT_TYPE_ECC_DETECT = 0x0e, MTHCA_EVENT_TYPE_CMD = 0x0a }; #define MTHCA_ASYNC_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_PATH_MIG) | \ (1ULL << MTHCA_EVENT_TYPE_COMM_EST) | \ (1ULL << MTHCA_EVENT_TYPE_SQ_DRAINED) | \ (1ULL << MTHCA_EVENT_TYPE_CQ_ERROR) | \ (1ULL << MTHCA_EVENT_TYPE_WQ_CATAS_ERROR) | \ (1ULL << MTHCA_EVENT_TYPE_EEC_CATAS_ERROR) | \ (1ULL << MTHCA_EVENT_TYPE_PATH_MIG_FAILED) | \ (1ULL << MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ (1ULL << MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR) | \ (1ULL << MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR) | \ (1ULL << MTHCA_EVENT_TYPE_PORT_CHANGE) | \ (1ULL << MTHCA_EVENT_TYPE_ECC_DETECT)) #define MTHCA_SRQ_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR) | \ (1ULL << MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE) | \ (1ULL << MTHCA_EVENT_TYPE_SRQ_LIMIT)) #define MTHCA_CMD_EVENT_MASK (1ULL << MTHCA_EVENT_TYPE_CMD) #define MTHCA_EQ_DB_INC_CI (1 << 24) #define MTHCA_EQ_DB_REQ_NOT (2 << 24) #define MTHCA_EQ_DB_DISARM_CQ (3 << 24) #define MTHCA_EQ_DB_SET_CI (4 << 24) #define MTHCA_EQ_DB_ALWAYS_ARM (5 << 24) struct mthca_eqe { u8 reserved1; u8 type; u8 reserved2; u8 subtype; union { u32 raw[6]; struct { __be32 cqn; } __packed comp; struct { u16 reserved1; __be16 token; u32 reserved2; u8 reserved3[3]; u8 status; __be64 out_param; } __packed cmd; struct { __be32 qpn; } __packed qp; struct { __be32 srqn; } __packed srq; struct { __be32 cqn; u32 reserved1; u8 reserved2[3]; u8 syndrome; } __packed cq_err; struct { u32 reserved1[2]; __be32 port; } __packed port_change; } event; u8 reserved3[3]; u8 owner; } __packed; #define MTHCA_EQ_ENTRY_OWNER_SW (0 << 7) #define MTHCA_EQ_ENTRY_OWNER_HW (1 << 7) static inline u64 async_mask(struct mthca_dev *dev) { return dev->mthca_flags & MTHCA_FLAG_SRQ ? MTHCA_ASYNC_EVENT_MASK | MTHCA_SRQ_EVENT_MASK : MTHCA_ASYNC_EVENT_MASK; } static inline void tavor_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) { /* * This barrier makes sure that all updates to ownership bits * done by set_eqe_hw() hit memory before the consumer index * is updated. set_eq_ci() allows the HCA to possibly write * more EQ entries, and we want to avoid the exceedingly * unlikely possibility of the HCA writing an entry and then * having set_eqe_hw() overwrite the owner field. */ wmb(); mthca_write64(MTHCA_EQ_DB_SET_CI | eq->eqn, ci & (eq->nent - 1), dev->kar + MTHCA_EQ_DOORBELL, MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); } static inline void arbel_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) { /* See comment in tavor_set_eq_ci() above. */ wmb(); __raw_writel((__force u32) cpu_to_be32(ci), dev->eq_regs.arbel.eq_set_ci_base + eq->eqn * 8); /* We still want ordering, just not swabbing, so add a barrier */ mb(); } static inline void set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) { if (mthca_is_memfree(dev)) arbel_set_eq_ci(dev, eq, ci); else tavor_set_eq_ci(dev, eq, ci); } static inline void tavor_eq_req_not(struct mthca_dev *dev, int eqn) { mthca_write64(MTHCA_EQ_DB_REQ_NOT | eqn, 0, dev->kar + MTHCA_EQ_DOORBELL, MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); } static inline void arbel_eq_req_not(struct mthca_dev *dev, u32 eqn_mask) { writel(eqn_mask, dev->eq_regs.arbel.eq_arm); } static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn) { if (!mthca_is_memfree(dev)) { mthca_write64(MTHCA_EQ_DB_DISARM_CQ | eqn, cqn, dev->kar + MTHCA_EQ_DOORBELL, MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); } } static inline struct mthca_eqe *get_eqe(struct mthca_eq *eq, u32 entry) { unsigned long off = (entry & (eq->nent - 1)) * MTHCA_EQ_ENTRY_SIZE; return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE; } static inline struct mthca_eqe *next_eqe_sw(struct mthca_eq *eq) { struct mthca_eqe *eqe; eqe = get_eqe(eq, eq->cons_index); return (MTHCA_EQ_ENTRY_OWNER_HW & eqe->owner) ? NULL : eqe; } static inline void set_eqe_hw(struct mthca_eqe *eqe) { eqe->owner = MTHCA_EQ_ENTRY_OWNER_HW; } static void port_change(struct mthca_dev *dev, int port, int active) { struct ib_event record; mthca_dbg(dev, "Port change to %s for port %d\n", active ? "active" : "down", port); record.device = &dev->ib_dev; record.event = active ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; record.element.port_num = port; ib_dispatch_event(&record); } static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq) { struct mthca_eqe *eqe; int disarm_cqn; int eqes_found = 0; int set_ci = 0; while ((eqe = next_eqe_sw(eq))) { /* * Make sure we read EQ entry contents after we've * checked the ownership bit. */ rmb(); switch (eqe->type) { case MTHCA_EVENT_TYPE_COMP: disarm_cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff; disarm_cq(dev, eq->eqn, disarm_cqn); mthca_cq_completion(dev, disarm_cqn); break; case MTHCA_EVENT_TYPE_PATH_MIG: mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, IB_EVENT_PATH_MIG); break; case MTHCA_EVENT_TYPE_COMM_EST: mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, IB_EVENT_COMM_EST); break; case MTHCA_EVENT_TYPE_SQ_DRAINED: mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, IB_EVENT_SQ_DRAINED); break; case MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE: mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, IB_EVENT_QP_LAST_WQE_REACHED); break; case MTHCA_EVENT_TYPE_SRQ_LIMIT: mthca_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff, IB_EVENT_SRQ_LIMIT_REACHED); break; case MTHCA_EVENT_TYPE_WQ_CATAS_ERROR: mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, IB_EVENT_QP_FATAL); break; case MTHCA_EVENT_TYPE_PATH_MIG_FAILED: mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, IB_EVENT_PATH_MIG_ERR); break; case MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR: mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, IB_EVENT_QP_REQ_ERR); break; case MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR: mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, IB_EVENT_QP_ACCESS_ERR); break; case MTHCA_EVENT_TYPE_CMD: mthca_cmd_event(dev, be16_to_cpu(eqe->event.cmd.token), eqe->event.cmd.status, be64_to_cpu(eqe->event.cmd.out_param)); break; case MTHCA_EVENT_TYPE_PORT_CHANGE: port_change(dev, (be32_to_cpu(eqe->event.port_change.port) >> 28) & 3, eqe->subtype == 0x4); break; case MTHCA_EVENT_TYPE_CQ_ERROR: mthca_warn(dev, "CQ %s on CQN %06x\n", eqe->event.cq_err.syndrome == 1 ? "overrun" : "access violation", be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff); mthca_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn), IB_EVENT_CQ_ERR); break; case MTHCA_EVENT_TYPE_EQ_OVERFLOW: mthca_warn(dev, "EQ overrun on EQN %d\n", eq->eqn); break; case MTHCA_EVENT_TYPE_EEC_CATAS_ERROR: case MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR: case MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR: case MTHCA_EVENT_TYPE_ECC_DETECT: default: mthca_warn(dev, "Unhandled event %02x(%02x) on EQ %d\n", eqe->type, eqe->subtype, eq->eqn); break; } set_eqe_hw(eqe); ++eq->cons_index; eqes_found = 1; ++set_ci; /* * The HCA will think the queue has overflowed if we * don't tell it we've been processing events. We * create our EQs with MTHCA_NUM_SPARE_EQE extra * entries, so we must update our consumer index at * least that often. */ if (unlikely(set_ci >= MTHCA_NUM_SPARE_EQE)) { /* * Conditional on hca_type is OK here because * this is a rare case, not the fast path. */ set_eq_ci(dev, eq, eq->cons_index); set_ci = 0; } } /* * Rely on caller to set consumer index so that we don't have * to test hca_type in our interrupt handling fast path. */ return eqes_found; } static irqreturn_t mthca_tavor_interrupt(int irq, void *dev_ptr) { struct mthca_dev *dev = dev_ptr; u32 ecr; int i; if (dev->eq_table.clr_mask) writel(dev->eq_table.clr_mask, dev->eq_table.clr_int); ecr = readl(dev->eq_regs.tavor.ecr_base + 4); if (!ecr) return IRQ_NONE; writel(ecr, dev->eq_regs.tavor.ecr_base + MTHCA_ECR_CLR_BASE - MTHCA_ECR_BASE + 4); for (i = 0; i < MTHCA_NUM_EQ; ++i) if (ecr & dev->eq_table.eq[i].eqn_mask) { if (mthca_eq_int(dev, &dev->eq_table.eq[i])) tavor_set_eq_ci(dev, &dev->eq_table.eq[i], dev->eq_table.eq[i].cons_index); tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn); } return IRQ_HANDLED; } static irqreturn_t mthca_tavor_msi_x_interrupt(int irq, void *eq_ptr) { struct mthca_eq *eq = eq_ptr; struct mthca_dev *dev = eq->dev; mthca_eq_int(dev, eq); tavor_set_eq_ci(dev, eq, eq->cons_index); tavor_eq_req_not(dev, eq->eqn); /* MSI-X vectors always belong to us */ return IRQ_HANDLED; } static irqreturn_t mthca_arbel_interrupt(int irq, void *dev_ptr) { struct mthca_dev *dev = dev_ptr; int work = 0; int i; if (dev->eq_table.clr_mask) writel(dev->eq_table.clr_mask, dev->eq_table.clr_int); for (i = 0; i < MTHCA_NUM_EQ; ++i) if (mthca_eq_int(dev, &dev->eq_table.eq[i])) { work = 1; arbel_set_eq_ci(dev, &dev->eq_table.eq[i], dev->eq_table.eq[i].cons_index); } arbel_eq_req_not(dev, dev->eq_table.arm_mask); return IRQ_RETVAL(work); } static irqreturn_t mthca_arbel_msi_x_interrupt(int irq, void *eq_ptr) { struct mthca_eq *eq = eq_ptr; struct mthca_dev *dev = eq->dev; mthca_eq_int(dev, eq); arbel_set_eq_ci(dev, eq, eq->cons_index); arbel_eq_req_not(dev, eq->eqn_mask); /* MSI-X vectors always belong to us */ return IRQ_HANDLED; } static int mthca_create_eq(struct mthca_dev *dev, int nent, u8 intr, struct mthca_eq *eq) { int npages; u64 *dma_list = NULL; dma_addr_t t; struct mthca_mailbox *mailbox; struct mthca_eq_context *eq_context; int err = -ENOMEM; int i; eq->dev = dev; eq->nent = roundup_pow_of_two(max(nent, 2)); npages = ALIGN(eq->nent * MTHCA_EQ_ENTRY_SIZE, PAGE_SIZE) / PAGE_SIZE; eq->page_list = kmalloc_array(npages, sizeof(*eq->page_list), GFP_KERNEL); if (!eq->page_list) goto err_out; for (i = 0; i < npages; ++i) eq->page_list[i].buf = NULL; dma_list = kmalloc_array(npages, sizeof(*dma_list), GFP_KERNEL); if (!dma_list) goto err_out_free; mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) goto err_out_free; eq_context = mailbox->buf; for (i = 0; i < npages; ++i) { eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, &t, GFP_KERNEL); if (!eq->page_list[i].buf) goto err_out_free_pages; dma_list[i] = t; dma_unmap_addr_set(&eq->page_list[i], mapping, t); clear_page(eq->page_list[i].buf); } for (i = 0; i < eq->nent; ++i) set_eqe_hw(get_eqe(eq, i)); eq->eqn = mthca_alloc(&dev->eq_table.alloc); if (eq->eqn == -1) goto err_out_free_pages; err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num, dma_list, PAGE_SHIFT, npages, 0, npages * PAGE_SIZE, MTHCA_MPT_FLAG_LOCAL_WRITE | MTHCA_MPT_FLAG_LOCAL_READ, &eq->mr); if (err) goto err_out_free_eq; memset(eq_context, 0, sizeof *eq_context); eq_context->flags = cpu_to_be32(MTHCA_EQ_STATUS_OK | MTHCA_EQ_OWNER_HW | MTHCA_EQ_STATE_ARMED | MTHCA_EQ_FLAG_TR); if (mthca_is_memfree(dev)) eq_context->flags |= cpu_to_be32(MTHCA_EQ_STATE_ARBEL); eq_context->logsize_usrpage = cpu_to_be32((ffs(eq->nent) - 1) << 24); if (mthca_is_memfree(dev)) { eq_context->arbel_pd = cpu_to_be32(dev->driver_pd.pd_num); } else { eq_context->logsize_usrpage |= cpu_to_be32(dev->driver_uar.index); eq_context->tavor_pd = cpu_to_be32(dev->driver_pd.pd_num); } eq_context->intr = intr; eq_context->lkey = cpu_to_be32(eq->mr.ibmr.lkey); err = mthca_SW2HW_EQ(dev, mailbox, eq->eqn); if (err) { mthca_warn(dev, "SW2HW_EQ returned %d\n", err); goto err_out_free_mr; } kfree(dma_list); mthca_free_mailbox(dev, mailbox); eq->eqn_mask = swab32(1 << eq->eqn); eq->cons_index = 0; dev->eq_table.arm_mask |= eq->eqn_mask; mthca_dbg(dev, "Allocated EQ %d with %d entries\n", eq->eqn, eq->nent); return err; err_out_free_mr: mthca_free_mr(dev, &eq->mr); err_out_free_eq: mthca_free(&dev->eq_table.alloc, eq->eqn); err_out_free_pages: for (i = 0; i < npages; ++i) if (eq->page_list[i].buf) dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, eq->page_list[i].buf, dma_unmap_addr(&eq->page_list[i], mapping)); mthca_free_mailbox(dev, mailbox); err_out_free: kfree(eq->page_list); kfree(dma_list); err_out: return err; } static void mthca_free_eq(struct mthca_dev *dev, struct mthca_eq *eq) { struct mthca_mailbox *mailbox; int err; int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) / PAGE_SIZE; int i; mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) return; err = mthca_HW2SW_EQ(dev, mailbox, eq->eqn); if (err) mthca_warn(dev, "HW2SW_EQ returned %d\n", err); dev->eq_table.arm_mask &= ~eq->eqn_mask; if (0) { mthca_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn); for (i = 0; i < sizeof (struct mthca_eq_context) / 4; ++i) { if (i % 4 == 0) printk("[%02x] ", i * 4); printk(" %08x", be32_to_cpup(mailbox->buf + i * 4)); if ((i + 1) % 4 == 0) printk("\n"); } } mthca_free_mr(dev, &eq->mr); for (i = 0; i < npages; ++i) dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, eq->page_list[i].buf, dma_unmap_addr(&eq->page_list[i], mapping)); kfree(eq->page_list); mthca_free_mailbox(dev, mailbox); } static void mthca_free_irqs(struct mthca_dev *dev) { int i; if (dev->eq_table.have_irq) free_irq(dev->pdev->irq, dev); for (i = 0; i < MTHCA_NUM_EQ; ++i) if (dev->eq_table.eq[i].have_irq) { free_irq(dev->eq_table.eq[i].msi_x_vector, dev->eq_table.eq + i); dev->eq_table.eq[i].have_irq = 0; } } static int mthca_map_reg(struct mthca_dev *dev, unsigned long offset, unsigned long size, void __iomem **map) { phys_addr_t base = pci_resource_start(dev->pdev, 0); *map = ioremap(base + offset, size); if (!*map) return -ENOMEM; return 0; } static int mthca_map_eq_regs(struct mthca_dev *dev) { if (mthca_is_memfree(dev)) { /* * We assume that the EQ arm and EQ set CI registers * fall within the first BAR. We can't trust the * values firmware gives us, since those addresses are * valid on the HCA's side of the PCI bus but not * necessarily the host side. */ if (mthca_map_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) & dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE, &dev->clr_base)) { mthca_err(dev, "Couldn't map interrupt clear register, " "aborting.\n"); return -ENOMEM; } /* * Add 4 because we limit ourselves to EQs 0 ... 31, * so we only need the low word of the register. */ if (mthca_map_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) & dev->fw.arbel.eq_arm_base) + 4, 4, &dev->eq_regs.arbel.eq_arm)) { mthca_err(dev, "Couldn't map EQ arm register, aborting.\n"); iounmap(dev->clr_base); return -ENOMEM; } if (mthca_map_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) & dev->fw.arbel.eq_set_ci_base, MTHCA_EQ_SET_CI_SIZE, &dev->eq_regs.arbel.eq_set_ci_base)) { mthca_err(dev, "Couldn't map EQ CI register, aborting.\n"); iounmap(dev->eq_regs.arbel.eq_arm); iounmap(dev->clr_base); return -ENOMEM; } } else { if (mthca_map_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE, &dev->clr_base)) { mthca_err(dev, "Couldn't map interrupt clear register, " "aborting.\n"); return -ENOMEM; } if (mthca_map_reg(dev, MTHCA_ECR_BASE, MTHCA_ECR_SIZE + MTHCA_ECR_CLR_SIZE, &dev->eq_regs.tavor.ecr_base)) { mthca_err(dev, "Couldn't map ecr register, " "aborting.\n"); iounmap(dev->clr_base); return -ENOMEM; } } return 0; } static void mthca_unmap_eq_regs(struct mthca_dev *dev) { if (mthca_is_memfree(dev)) { iounmap(dev->eq_regs.arbel.eq_set_ci_base); iounmap(dev->eq_regs.arbel.eq_arm); iounmap(dev->clr_base); } else { iounmap(dev->eq_regs.tavor.ecr_base); iounmap(dev->clr_base); } } int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt) { int ret; /* * We assume that mapping one page is enough for the whole EQ * context table. This is fine with all current HCAs, because * we only use 32 EQs and each EQ uses 32 bytes of context * memory, or 1 KB total. */ dev->eq_table.icm_virt = icm_virt; dev->eq_table.icm_page = alloc_page(GFP_HIGHUSER); if (!dev->eq_table.icm_page) return -ENOMEM; dev->eq_table.icm_dma = dma_map_page(&dev->pdev->dev, dev->eq_table.icm_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(&dev->pdev->dev, dev->eq_table.icm_dma)) { __free_page(dev->eq_table.icm_page); return -ENOMEM; } ret = mthca_MAP_ICM_page(dev, dev->eq_table.icm_dma, icm_virt); if (ret) { dma_unmap_page(&dev->pdev->dev, dev->eq_table.icm_dma, PAGE_SIZE, DMA_BIDIRECTIONAL); __free_page(dev->eq_table.icm_page); } return ret; } void mthca_unmap_eq_icm(struct mthca_dev *dev) { mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, 1); dma_unmap_page(&dev->pdev->dev, dev->eq_table.icm_dma, PAGE_SIZE, DMA_BIDIRECTIONAL); __free_page(dev->eq_table.icm_page); } int mthca_init_eq_table(struct mthca_dev *dev) { int err; u8 intr; int i; err = mthca_alloc_init(&dev->eq_table.alloc, dev->limits.num_eqs, dev->limits.num_eqs - 1, dev->limits.reserved_eqs); if (err) return err; err = mthca_map_eq_regs(dev); if (err) goto err_out_free; if (dev->mthca_flags & MTHCA_FLAG_MSI_X) { dev->eq_table.clr_mask = 0; } else { dev->eq_table.clr_mask = swab32(1 << (dev->eq_table.inta_pin & 31)); dev->eq_table.clr_int = dev->clr_base + (dev->eq_table.inta_pin < 32 ? 4 : 0); } dev->eq_table.arm_mask = 0; intr = dev->eq_table.inta_pin; err = mthca_create_eq(dev, dev->limits.num_cqs + MTHCA_NUM_SPARE_EQE, (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 128 : intr, &dev->eq_table.eq[MTHCA_EQ_COMP]); if (err) goto err_out_unmap; err = mthca_create_eq(dev, MTHCA_NUM_ASYNC_EQE + MTHCA_NUM_SPARE_EQE, (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 129 : intr, &dev->eq_table.eq[MTHCA_EQ_ASYNC]); if (err) goto err_out_comp; err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE + MTHCA_NUM_SPARE_EQE, (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 130 : intr, &dev->eq_table.eq[MTHCA_EQ_CMD]); if (err) goto err_out_async; if (dev->mthca_flags & MTHCA_FLAG_MSI_X) { static const char *eq_name[] = { [MTHCA_EQ_COMP] = DRV_NAME "-comp", [MTHCA_EQ_ASYNC] = DRV_NAME "-async", [MTHCA_EQ_CMD] = DRV_NAME "-cmd" }; for (i = 0; i < MTHCA_NUM_EQ; ++i) { snprintf(dev->eq_table.eq[i].irq_name, IB_DEVICE_NAME_MAX, "%s@pci:%s", eq_name[i], pci_name(dev->pdev)); err = request_irq(dev->eq_table.eq[i].msi_x_vector, mthca_is_memfree(dev) ? mthca_arbel_msi_x_interrupt : mthca_tavor_msi_x_interrupt, 0, dev->eq_table.eq[i].irq_name, dev->eq_table.eq + i); if (err) goto err_out_cmd; dev->eq_table.eq[i].have_irq = 1; } } else { snprintf(dev->eq_table.eq[0].irq_name, IB_DEVICE_NAME_MAX, DRV_NAME "@pci:%s", pci_name(dev->pdev)); err = request_irq(dev->pdev->irq, mthca_is_memfree(dev) ? mthca_arbel_interrupt : mthca_tavor_interrupt, IRQF_SHARED, dev->eq_table.eq[0].irq_name, dev); if (err) goto err_out_cmd; dev->eq_table.have_irq = 1; } err = mthca_MAP_EQ(dev, async_mask(dev), 0, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn); if (err) mthca_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, err); err = mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK, 0, dev->eq_table.eq[MTHCA_EQ_CMD].eqn); if (err) mthca_warn(dev, "MAP_EQ for cmd EQ %d failed (%d)\n", dev->eq_table.eq[MTHCA_EQ_CMD].eqn, err); for (i = 0; i < MTHCA_NUM_EQ; ++i) if (mthca_is_memfree(dev)) arbel_eq_req_not(dev, dev->eq_table.eq[i].eqn_mask); else tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn); return 0; err_out_cmd: mthca_free_irqs(dev); mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_CMD]); err_out_async: mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_ASYNC]); err_out_comp: mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_COMP]); err_out_unmap: mthca_unmap_eq_regs(dev); err_out_free: mthca_alloc_cleanup(&dev->eq_table.alloc); return err; } void mthca_cleanup_eq_table(struct mthca_dev *dev) { int i; mthca_free_irqs(dev); mthca_MAP_EQ(dev, async_mask(dev), 1, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn); mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK, 1, dev->eq_table.eq[MTHCA_EQ_CMD].eqn); for (i = 0; i < MTHCA_NUM_EQ; ++i) mthca_free_eq(dev, &dev->eq_table.eq[i]); mthca_unmap_eq_regs(dev); mthca_alloc_cleanup(&dev->eq_table.alloc); }
linux-master
drivers/infiniband/hw/mthca/mthca_eq.c
/* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/string.h> #include <linux/slab.h> #include "mthca_profile.h" enum { MTHCA_RES_QP, MTHCA_RES_EEC, MTHCA_RES_SRQ, MTHCA_RES_CQ, MTHCA_RES_EQP, MTHCA_RES_EEEC, MTHCA_RES_EQ, MTHCA_RES_RDB, MTHCA_RES_MCG, MTHCA_RES_MPT, MTHCA_RES_MTT, MTHCA_RES_UAR, MTHCA_RES_UDAV, MTHCA_RES_UARC, MTHCA_RES_NUM }; enum { MTHCA_NUM_EQS = 32, MTHCA_NUM_PDS = 1 << 15 }; s64 mthca_make_profile(struct mthca_dev *dev, struct mthca_profile *request, struct mthca_dev_lim *dev_lim, struct mthca_init_hca_param *init_hca) { struct mthca_resource { u64 size; u64 start; int type; int num; int log_num; }; u64 mem_base, mem_avail; s64 total_size = 0; struct mthca_resource *profile; int i, j; profile = kcalloc(MTHCA_RES_NUM, sizeof(*profile), GFP_KERNEL); if (!profile) return -ENOMEM; profile[MTHCA_RES_QP].size = dev_lim->qpc_entry_sz; profile[MTHCA_RES_EEC].size = dev_lim->eec_entry_sz; profile[MTHCA_RES_SRQ].size = dev_lim->srq_entry_sz; profile[MTHCA_RES_CQ].size = dev_lim->cqc_entry_sz; profile[MTHCA_RES_EQP].size = dev_lim->eqpc_entry_sz; profile[MTHCA_RES_EEEC].size = dev_lim->eeec_entry_sz; profile[MTHCA_RES_EQ].size = dev_lim->eqc_entry_sz; profile[MTHCA_RES_RDB].size = MTHCA_RDB_ENTRY_SIZE; profile[MTHCA_RES_MCG].size = MTHCA_MGM_ENTRY_SIZE; profile[MTHCA_RES_MPT].size = dev_lim->mpt_entry_sz; profile[MTHCA_RES_MTT].size = dev->limits.mtt_seg_size; profile[MTHCA_RES_UAR].size = dev_lim->uar_scratch_entry_sz; profile[MTHCA_RES_UDAV].size = MTHCA_AV_SIZE; profile[MTHCA_RES_UARC].size = request->uarc_size; profile[MTHCA_RES_QP].num = request->num_qp; profile[MTHCA_RES_SRQ].num = request->num_srq; profile[MTHCA_RES_EQP].num = request->num_qp; profile[MTHCA_RES_RDB].num = request->num_qp * request->rdb_per_qp; profile[MTHCA_RES_CQ].num = request->num_cq; profile[MTHCA_RES_EQ].num = MTHCA_NUM_EQS; profile[MTHCA_RES_MCG].num = request->num_mcg; profile[MTHCA_RES_MPT].num = request->num_mpt; profile[MTHCA_RES_MTT].num = request->num_mtt; profile[MTHCA_RES_UAR].num = request->num_uar; profile[MTHCA_RES_UARC].num = request->num_uar; profile[MTHCA_RES_UDAV].num = request->num_udav; for (i = 0; i < MTHCA_RES_NUM; ++i) { profile[i].type = i; profile[i].log_num = max(ffs(profile[i].num) - 1, 0); profile[i].size *= profile[i].num; if (mthca_is_memfree(dev)) profile[i].size = max(profile[i].size, (u64) PAGE_SIZE); } if (mthca_is_memfree(dev)) { mem_base = 0; mem_avail = dev_lim->hca.arbel.max_icm_sz; } else { mem_base = dev->ddr_start; mem_avail = dev->fw.tavor.fw_start - dev->ddr_start; } /* * Sort the resources in decreasing order of size. Since they * all have sizes that are powers of 2, we'll be able to keep * resources aligned to their size and pack them without gaps * using the sorted order. */ for (i = MTHCA_RES_NUM; i > 0; --i) for (j = 1; j < i; ++j) { if (profile[j].size > profile[j - 1].size) swap(profile[j], profile[j - 1]); } for (i = 0; i < MTHCA_RES_NUM; ++i) { if (profile[i].size) { profile[i].start = mem_base + total_size; total_size += profile[i].size; } if (total_size > mem_avail) { mthca_err(dev, "Profile requires 0x%llx bytes; " "won't fit in 0x%llx bytes of context memory.\n", (unsigned long long) total_size, (unsigned long long) mem_avail); kfree(profile); return -ENOMEM; } if (profile[i].size) mthca_dbg(dev, "profile[%2d]--%2d/%2d @ 0x%16llx " "(size 0x%8llx)\n", i, profile[i].type, profile[i].log_num, (unsigned long long) profile[i].start, (unsigned long long) profile[i].size); } if (mthca_is_memfree(dev)) mthca_dbg(dev, "HCA context memory: reserving %d KB\n", (int) (total_size >> 10)); else mthca_dbg(dev, "HCA memory: allocated %d KB/%d KB (%d KB free)\n", (int) (total_size >> 10), (int) (mem_avail >> 10), (int) ((mem_avail - total_size) >> 10)); for (i = 0; i < MTHCA_RES_NUM; ++i) { switch (profile[i].type) { case MTHCA_RES_QP: dev->limits.num_qps = profile[i].num; init_hca->qpc_base = profile[i].start; init_hca->log_num_qps = profile[i].log_num; break; case MTHCA_RES_EEC: dev->limits.num_eecs = profile[i].num; init_hca->eec_base = profile[i].start; init_hca->log_num_eecs = profile[i].log_num; break; case MTHCA_RES_SRQ: dev->limits.num_srqs = profile[i].num; init_hca->srqc_base = profile[i].start; init_hca->log_num_srqs = profile[i].log_num; break; case MTHCA_RES_CQ: dev->limits.num_cqs = profile[i].num; init_hca->cqc_base = profile[i].start; init_hca->log_num_cqs = profile[i].log_num; break; case MTHCA_RES_EQP: init_hca->eqpc_base = profile[i].start; break; case MTHCA_RES_EEEC: init_hca->eeec_base = profile[i].start; break; case MTHCA_RES_EQ: dev->limits.num_eqs = profile[i].num; init_hca->eqc_base = profile[i].start; init_hca->log_num_eqs = profile[i].log_num; break; case MTHCA_RES_RDB: for (dev->qp_table.rdb_shift = 0; request->num_qp << dev->qp_table.rdb_shift < profile[i].num; ++dev->qp_table.rdb_shift) ; /* nothing */ dev->qp_table.rdb_base = (u32) profile[i].start; init_hca->rdb_base = profile[i].start; break; case MTHCA_RES_MCG: dev->limits.num_mgms = profile[i].num >> 1; dev->limits.num_amgms = profile[i].num >> 1; init_hca->mc_base = profile[i].start; init_hca->log_mc_entry_sz = ffs(MTHCA_MGM_ENTRY_SIZE) - 1; init_hca->log_mc_table_sz = profile[i].log_num; init_hca->mc_hash_sz = 1 << (profile[i].log_num - 1); break; case MTHCA_RES_MPT: dev->limits.num_mpts = profile[i].num; dev->mr_table.mpt_base = profile[i].start; init_hca->mpt_base = profile[i].start; init_hca->log_mpt_sz = profile[i].log_num; break; case MTHCA_RES_MTT: dev->limits.num_mtt_segs = profile[i].num; dev->mr_table.mtt_base = profile[i].start; init_hca->mtt_base = profile[i].start; init_hca->mtt_seg_sz = ffs(dev->limits.mtt_seg_size) - 7; break; case MTHCA_RES_UAR: dev->limits.num_uars = profile[i].num; init_hca->uar_scratch_base = profile[i].start; break; case MTHCA_RES_UDAV: dev->av_table.ddr_av_base = profile[i].start; dev->av_table.num_ddr_avs = profile[i].num; break; case MTHCA_RES_UARC: dev->uar_table.uarc_size = request->uarc_size; dev->uar_table.uarc_base = profile[i].start; init_hca->uarc_base = profile[i].start; init_hca->log_uarc_sz = ffs(request->uarc_size) - 13; init_hca->log_uar_sz = ffs(request->num_uar) - 1; break; default: break; } } /* * PDs don't take any HCA memory, but we assign them as part * of the HCA profile anyway. */ dev->limits.num_pds = MTHCA_NUM_PDS; if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT && init_hca->log_mpt_sz > 23) { mthca_warn(dev, "MPT table too large (requested size 2^%d >= 2^24)\n", init_hca->log_mpt_sz); mthca_warn(dev, "Disabling memory key throughput optimization.\n"); dev->mthca_flags &= ~MTHCA_FLAG_SINAI_OPT; } /* * For Tavor, FMRs use ioremapped PCI memory. For 32 bit * systems it may use too much vmalloc space to map all MTT * memory, so we reserve some MTTs for FMR access, taking them * out of the MR pool. They don't use additional memory, but * we assign them as part of the HCA profile anyway. */ if (mthca_is_memfree(dev) || BITS_PER_LONG == 64) dev->limits.fmr_reserved_mtts = 0; else dev->limits.fmr_reserved_mtts = request->fmr_reserved_mtts; kfree(profile); return total_size; }
linux-master
drivers/infiniband/hw/mthca/mthca_profile.c
/* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/completion.h> #include <linux/pci.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/module.h> #include <linux/slab.h> #include <asm/io.h> #include <rdma/ib_mad.h> #include "mthca_dev.h" #include "mthca_config_reg.h" #include "mthca_cmd.h" #include "mthca_memfree.h" #define CMD_POLL_TOKEN 0xffff enum { HCR_IN_PARAM_OFFSET = 0x00, HCR_IN_MODIFIER_OFFSET = 0x08, HCR_OUT_PARAM_OFFSET = 0x0c, HCR_TOKEN_OFFSET = 0x14, HCR_STATUS_OFFSET = 0x18, HCR_OPMOD_SHIFT = 12, HCA_E_BIT = 22, HCR_GO_BIT = 23 }; enum { /* initialization and general commands */ CMD_SYS_EN = 0x1, CMD_SYS_DIS = 0x2, CMD_MAP_FA = 0xfff, CMD_UNMAP_FA = 0xffe, CMD_RUN_FW = 0xff6, CMD_MOD_STAT_CFG = 0x34, CMD_QUERY_DEV_LIM = 0x3, CMD_QUERY_FW = 0x4, CMD_ENABLE_LAM = 0xff8, CMD_DISABLE_LAM = 0xff7, CMD_QUERY_DDR = 0x5, CMD_QUERY_ADAPTER = 0x6, CMD_INIT_HCA = 0x7, CMD_CLOSE_HCA = 0x8, CMD_INIT_IB = 0x9, CMD_CLOSE_IB = 0xa, CMD_QUERY_HCA = 0xb, CMD_SET_IB = 0xc, CMD_ACCESS_DDR = 0x2e, CMD_MAP_ICM = 0xffa, CMD_UNMAP_ICM = 0xff9, CMD_MAP_ICM_AUX = 0xffc, CMD_UNMAP_ICM_AUX = 0xffb, CMD_SET_ICM_SIZE = 0xffd, /* TPT commands */ CMD_SW2HW_MPT = 0xd, CMD_QUERY_MPT = 0xe, CMD_HW2SW_MPT = 0xf, CMD_READ_MTT = 0x10, CMD_WRITE_MTT = 0x11, CMD_SYNC_TPT = 0x2f, /* EQ commands */ CMD_MAP_EQ = 0x12, CMD_SW2HW_EQ = 0x13, CMD_HW2SW_EQ = 0x14, CMD_QUERY_EQ = 0x15, /* CQ commands */ CMD_SW2HW_CQ = 0x16, CMD_HW2SW_CQ = 0x17, CMD_QUERY_CQ = 0x18, CMD_RESIZE_CQ = 0x2c, /* SRQ commands */ CMD_SW2HW_SRQ = 0x35, CMD_HW2SW_SRQ = 0x36, CMD_QUERY_SRQ = 0x37, CMD_ARM_SRQ = 0x40, /* QP/EE commands */ CMD_RST2INIT_QPEE = 0x19, CMD_INIT2RTR_QPEE = 0x1a, CMD_RTR2RTS_QPEE = 0x1b, CMD_RTS2RTS_QPEE = 0x1c, CMD_SQERR2RTS_QPEE = 0x1d, CMD_2ERR_QPEE = 0x1e, CMD_RTS2SQD_QPEE = 0x1f, CMD_SQD2SQD_QPEE = 0x38, CMD_SQD2RTS_QPEE = 0x20, CMD_ERR2RST_QPEE = 0x21, CMD_QUERY_QPEE = 0x22, CMD_INIT2INIT_QPEE = 0x2d, CMD_SUSPEND_QPEE = 0x32, CMD_UNSUSPEND_QPEE = 0x33, /* special QPs and management commands */ CMD_CONF_SPECIAL_QP = 0x23, CMD_MAD_IFC = 0x24, /* multicast commands */ CMD_READ_MGM = 0x25, CMD_WRITE_MGM = 0x26, CMD_MGID_HASH = 0x27, /* miscellaneous commands */ CMD_DIAG_RPRT = 0x30, CMD_NOP = 0x31, /* debug commands */ CMD_QUERY_DEBUG_MSG = 0x2a, CMD_SET_DEBUG_MSG = 0x2b, }; /* * According to Mellanox code, FW may be starved and never complete * commands. So we can't use strict timeouts described in PRM -- we * just arbitrarily select 60 seconds for now. */ #if 0 /* * Round up and add 1 to make sure we get the full wait time (since we * will be starting in the middle of a jiffy) */ enum { CMD_TIME_CLASS_A = (HZ + 999) / 1000 + 1, CMD_TIME_CLASS_B = (HZ + 99) / 100 + 1, CMD_TIME_CLASS_C = (HZ + 9) / 10 + 1, CMD_TIME_CLASS_D = 60 * HZ }; #else enum { CMD_TIME_CLASS_A = 60 * HZ, CMD_TIME_CLASS_B = 60 * HZ, CMD_TIME_CLASS_C = 60 * HZ, CMD_TIME_CLASS_D = 60 * HZ }; #endif enum { GO_BIT_TIMEOUT = HZ * 10 }; struct mthca_cmd_context { struct completion done; int result; int next; u64 out_param; u16 token; u8 status; }; static int fw_cmd_doorbell = 0; module_param(fw_cmd_doorbell, int, 0644); MODULE_PARM_DESC(fw_cmd_doorbell, "post FW commands through doorbell page if nonzero " "(and supported by FW)"); static inline int go_bit(struct mthca_dev *dev) { return readl(dev->hcr + HCR_STATUS_OFFSET) & swab32(1 << HCR_GO_BIT); } static void mthca_cmd_post_dbell(struct mthca_dev *dev, u64 in_param, u64 out_param, u32 in_modifier, u8 op_modifier, u16 op, u16 token) { void __iomem *ptr = dev->cmd.dbell_map; u16 *offs = dev->cmd.dbell_offsets; __raw_writel((__force u32) cpu_to_be32(in_param >> 32), ptr + offs[0]); wmb(); __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), ptr + offs[1]); wmb(); __raw_writel((__force u32) cpu_to_be32(in_modifier), ptr + offs[2]); wmb(); __raw_writel((__force u32) cpu_to_be32(out_param >> 32), ptr + offs[3]); wmb(); __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), ptr + offs[4]); wmb(); __raw_writel((__force u32) cpu_to_be32(token << 16), ptr + offs[5]); wmb(); __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) | (1 << HCA_E_BIT) | (op_modifier << HCR_OPMOD_SHIFT) | op), ptr + offs[6]); wmb(); __raw_writel((__force u32) 0, ptr + offs[7]); wmb(); } static int mthca_cmd_post_hcr(struct mthca_dev *dev, u64 in_param, u64 out_param, u32 in_modifier, u8 op_modifier, u16 op, u16 token, int event) { if (event) { unsigned long end = jiffies + GO_BIT_TIMEOUT; while (go_bit(dev) && time_before(jiffies, end)) { set_current_state(TASK_RUNNING); schedule(); } } if (go_bit(dev)) return -EAGAIN; /* * We use writel (instead of something like memcpy_toio) * because writes of less than 32 bits to the HCR don't work * (and some architectures such as ia64 implement memcpy_toio * in terms of writeb). */ __raw_writel((__force u32) cpu_to_be32(in_param >> 32), dev->hcr + 0 * 4); __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), dev->hcr + 1 * 4); __raw_writel((__force u32) cpu_to_be32(in_modifier), dev->hcr + 2 * 4); __raw_writel((__force u32) cpu_to_be32(out_param >> 32), dev->hcr + 3 * 4); __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), dev->hcr + 4 * 4); __raw_writel((__force u32) cpu_to_be32(token << 16), dev->hcr + 5 * 4); /* __raw_writel may not order writes. */ wmb(); __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) | (event ? (1 << HCA_E_BIT) : 0) | (op_modifier << HCR_OPMOD_SHIFT) | op), dev->hcr + 6 * 4); return 0; } static int mthca_cmd_post(struct mthca_dev *dev, u64 in_param, u64 out_param, u32 in_modifier, u8 op_modifier, u16 op, u16 token, int event) { int err = 0; mutex_lock(&dev->cmd.hcr_mutex); if (event && dev->cmd.flags & MTHCA_CMD_POST_DOORBELLS && fw_cmd_doorbell) mthca_cmd_post_dbell(dev, in_param, out_param, in_modifier, op_modifier, op, token); else err = mthca_cmd_post_hcr(dev, in_param, out_param, in_modifier, op_modifier, op, token, event); mutex_unlock(&dev->cmd.hcr_mutex); return err; } static int mthca_status_to_errno(u8 status) { static const int trans_table[] = { [MTHCA_CMD_STAT_INTERNAL_ERR] = -EIO, [MTHCA_CMD_STAT_BAD_OP] = -EPERM, [MTHCA_CMD_STAT_BAD_PARAM] = -EINVAL, [MTHCA_CMD_STAT_BAD_SYS_STATE] = -ENXIO, [MTHCA_CMD_STAT_BAD_RESOURCE] = -EBADF, [MTHCA_CMD_STAT_RESOURCE_BUSY] = -EBUSY, [MTHCA_CMD_STAT_DDR_MEM_ERR] = -ENOMEM, [MTHCA_CMD_STAT_EXCEED_LIM] = -ENOMEM, [MTHCA_CMD_STAT_BAD_RES_STATE] = -EBADF, [MTHCA_CMD_STAT_BAD_INDEX] = -EBADF, [MTHCA_CMD_STAT_BAD_NVMEM] = -EFAULT, [MTHCA_CMD_STAT_BAD_QPEE_STATE] = -EINVAL, [MTHCA_CMD_STAT_BAD_SEG_PARAM] = -EFAULT, [MTHCA_CMD_STAT_REG_BOUND] = -EBUSY, [MTHCA_CMD_STAT_LAM_NOT_PRE] = -EAGAIN, [MTHCA_CMD_STAT_BAD_PKT] = -EBADMSG, [MTHCA_CMD_STAT_BAD_SIZE] = -ENOMEM, }; if (status >= ARRAY_SIZE(trans_table) || (status != MTHCA_CMD_STAT_OK && trans_table[status] == 0)) return -EINVAL; return trans_table[status]; } static int mthca_cmd_poll(struct mthca_dev *dev, u64 in_param, u64 *out_param, int out_is_imm, u32 in_modifier, u8 op_modifier, u16 op, unsigned long timeout) { int err = 0; unsigned long end; u8 status; down(&dev->cmd.poll_sem); err = mthca_cmd_post(dev, in_param, out_param ? *out_param : 0, in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0); if (err) goto out; end = timeout + jiffies; while (go_bit(dev) && time_before(jiffies, end)) { set_current_state(TASK_RUNNING); schedule(); } if (go_bit(dev)) { err = -EBUSY; goto out; } if (out_is_imm && out_param) { *out_param = (u64) be32_to_cpu((__force __be32) __raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET)) << 32 | (u64) be32_to_cpu((__force __be32) __raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET + 4)); } else if (out_is_imm) { err = -EINVAL; goto out; } status = be32_to_cpu((__force __be32) __raw_readl(dev->hcr + HCR_STATUS_OFFSET)) >> 24; if (status) { mthca_dbg(dev, "Command %02x completed with status %02x\n", op, status); err = mthca_status_to_errno(status); } out: up(&dev->cmd.poll_sem); return err; } void mthca_cmd_event(struct mthca_dev *dev, u16 token, u8 status, u64 out_param) { struct mthca_cmd_context *context = &dev->cmd.context[token & dev->cmd.token_mask]; /* previously timed out command completing at long last */ if (token != context->token) return; context->result = 0; context->status = status; context->out_param = out_param; complete(&context->done); } static int mthca_cmd_wait(struct mthca_dev *dev, u64 in_param, u64 *out_param, int out_is_imm, u32 in_modifier, u8 op_modifier, u16 op, unsigned long timeout) { int err = 0; struct mthca_cmd_context *context; down(&dev->cmd.event_sem); spin_lock(&dev->cmd.context_lock); BUG_ON(dev->cmd.free_head < 0); context = &dev->cmd.context[dev->cmd.free_head]; context->token += dev->cmd.token_mask + 1; dev->cmd.free_head = context->next; spin_unlock(&dev->cmd.context_lock); init_completion(&context->done); err = mthca_cmd_post(dev, in_param, out_param ? *out_param : 0, in_modifier, op_modifier, op, context->token, 1); if (err) goto out; if (!wait_for_completion_timeout(&context->done, timeout)) { err = -EBUSY; goto out; } err = context->result; if (err) goto out; if (context->status) { mthca_dbg(dev, "Command %02x completed with status %02x\n", op, context->status); err = mthca_status_to_errno(context->status); } if (out_is_imm && out_param) { *out_param = context->out_param; } else if (out_is_imm) { err = -EINVAL; goto out; } out: spin_lock(&dev->cmd.context_lock); context->next = dev->cmd.free_head; dev->cmd.free_head = context - dev->cmd.context; spin_unlock(&dev->cmd.context_lock); up(&dev->cmd.event_sem); return err; } /* Invoke a command with an output mailbox */ static int mthca_cmd_box(struct mthca_dev *dev, u64 in_param, u64 out_param, u32 in_modifier, u8 op_modifier, u16 op, unsigned long timeout) { if (dev->cmd.flags & MTHCA_CMD_USE_EVENTS) return mthca_cmd_wait(dev, in_param, &out_param, 0, in_modifier, op_modifier, op, timeout); else return mthca_cmd_poll(dev, in_param, &out_param, 0, in_modifier, op_modifier, op, timeout); } /* Invoke a command with no output parameter */ static int mthca_cmd(struct mthca_dev *dev, u64 in_param, u32 in_modifier, u8 op_modifier, u16 op, unsigned long timeout) { return mthca_cmd_box(dev, in_param, 0, in_modifier, op_modifier, op, timeout); } /* * Invoke a command with an immediate output parameter (and copy the * output into the caller's out_param pointer after the command * executes). */ static int mthca_cmd_imm(struct mthca_dev *dev, u64 in_param, u64 *out_param, u32 in_modifier, u8 op_modifier, u16 op, unsigned long timeout) { if (dev->cmd.flags & MTHCA_CMD_USE_EVENTS) return mthca_cmd_wait(dev, in_param, out_param, 1, in_modifier, op_modifier, op, timeout); else return mthca_cmd_poll(dev, in_param, out_param, 1, in_modifier, op_modifier, op, timeout); } int mthca_cmd_init(struct mthca_dev *dev) { mutex_init(&dev->cmd.hcr_mutex); sema_init(&dev->cmd.poll_sem, 1); dev->cmd.flags = 0; dev->hcr = ioremap(pci_resource_start(dev->pdev, 0) + MTHCA_HCR_BASE, MTHCA_HCR_SIZE); if (!dev->hcr) { mthca_err(dev, "Couldn't map command register."); return -ENOMEM; } dev->cmd.pool = dma_pool_create("mthca_cmd", &dev->pdev->dev, MTHCA_MAILBOX_SIZE, MTHCA_MAILBOX_SIZE, 0); if (!dev->cmd.pool) { iounmap(dev->hcr); return -ENOMEM; } return 0; } void mthca_cmd_cleanup(struct mthca_dev *dev) { dma_pool_destroy(dev->cmd.pool); iounmap(dev->hcr); if (dev->cmd.flags & MTHCA_CMD_POST_DOORBELLS) iounmap(dev->cmd.dbell_map); } /* * Switch to using events to issue FW commands (should be called after * event queue to command events has been initialized). */ int mthca_cmd_use_events(struct mthca_dev *dev) { int i; dev->cmd.context = kmalloc_array(dev->cmd.max_cmds, sizeof(struct mthca_cmd_context), GFP_KERNEL); if (!dev->cmd.context) return -ENOMEM; for (i = 0; i < dev->cmd.max_cmds; ++i) { dev->cmd.context[i].token = i; dev->cmd.context[i].next = i + 1; } dev->cmd.context[dev->cmd.max_cmds - 1].next = -1; dev->cmd.free_head = 0; sema_init(&dev->cmd.event_sem, dev->cmd.max_cmds); spin_lock_init(&dev->cmd.context_lock); for (dev->cmd.token_mask = 1; dev->cmd.token_mask < dev->cmd.max_cmds; dev->cmd.token_mask <<= 1) ; /* nothing */ --dev->cmd.token_mask; dev->cmd.flags |= MTHCA_CMD_USE_EVENTS; down(&dev->cmd.poll_sem); return 0; } /* * Switch back to polling (used when shutting down the device) */ void mthca_cmd_use_polling(struct mthca_dev *dev) { int i; dev->cmd.flags &= ~MTHCA_CMD_USE_EVENTS; for (i = 0; i < dev->cmd.max_cmds; ++i) down(&dev->cmd.event_sem); kfree(dev->cmd.context); up(&dev->cmd.poll_sem); } struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev, gfp_t gfp_mask) { struct mthca_mailbox *mailbox; mailbox = kmalloc(sizeof *mailbox, gfp_mask); if (!mailbox) return ERR_PTR(-ENOMEM); mailbox->buf = dma_pool_alloc(dev->cmd.pool, gfp_mask, &mailbox->dma); if (!mailbox->buf) { kfree(mailbox); return ERR_PTR(-ENOMEM); } return mailbox; } void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox) { if (!mailbox) return; dma_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma); kfree(mailbox); } int mthca_SYS_EN(struct mthca_dev *dev) { u64 out; int ret; ret = mthca_cmd_imm(dev, 0, &out, 0, 0, CMD_SYS_EN, CMD_TIME_CLASS_D); if (ret == -ENOMEM) mthca_warn(dev, "SYS_EN DDR error: syn=%x, sock=%d, " "sladdr=%d, SPD source=%s\n", (int) (out >> 6) & 0xf, (int) (out >> 4) & 3, (int) (out >> 1) & 7, (int) out & 1 ? "NVMEM" : "DIMM"); return ret; } int mthca_SYS_DIS(struct mthca_dev *dev) { return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, CMD_TIME_CLASS_C); } static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm, u64 virt) { struct mthca_mailbox *mailbox; struct mthca_icm_iter iter; __be64 *pages; int lg; int nent = 0; int i; int err = 0; int ts = 0, tc = 0; mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); memset(mailbox->buf, 0, MTHCA_MAILBOX_SIZE); pages = mailbox->buf; for (mthca_icm_first(icm, &iter); !mthca_icm_last(&iter); mthca_icm_next(&iter)) { /* * We have to pass pages that are aligned to their * size, so find the least significant 1 in the * address or size and use that as our log2 size. */ lg = ffs(mthca_icm_addr(&iter) | mthca_icm_size(&iter)) - 1; if (lg < MTHCA_ICM_PAGE_SHIFT) { mthca_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n", MTHCA_ICM_PAGE_SIZE, (unsigned long long) mthca_icm_addr(&iter), mthca_icm_size(&iter)); err = -EINVAL; goto out; } for (i = 0; i < mthca_icm_size(&iter) >> lg; ++i) { if (virt != -1) { pages[nent * 2] = cpu_to_be64(virt); virt += 1ULL << lg; } pages[nent * 2 + 1] = cpu_to_be64((mthca_icm_addr(&iter) + (i << lg)) | (lg - MTHCA_ICM_PAGE_SHIFT)); ts += 1 << (lg - 10); ++tc; if (++nent == MTHCA_MAILBOX_SIZE / 16) { err = mthca_cmd(dev, mailbox->dma, nent, 0, op, CMD_TIME_CLASS_B); if (err) goto out; nent = 0; } } } if (nent) err = mthca_cmd(dev, mailbox->dma, nent, 0, op, CMD_TIME_CLASS_B); switch (op) { case CMD_MAP_FA: mthca_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts); break; case CMD_MAP_ICM_AUX: mthca_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts); break; case CMD_MAP_ICM: mthca_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM.\n", tc, ts, (unsigned long long) virt - (ts << 10)); break; } out: mthca_free_mailbox(dev, mailbox); return err; } int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm) { return mthca_map_cmd(dev, CMD_MAP_FA, icm, -1); } int mthca_UNMAP_FA(struct mthca_dev *dev) { return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_FA, CMD_TIME_CLASS_B); } int mthca_RUN_FW(struct mthca_dev *dev) { return mthca_cmd(dev, 0, 0, 0, CMD_RUN_FW, CMD_TIME_CLASS_A); } static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base) { phys_addr_t addr; u16 max_off = 0; int i; for (i = 0; i < 8; ++i) max_off = max(max_off, dev->cmd.dbell_offsets[i]); if ((base & PAGE_MASK) != ((base + max_off) & PAGE_MASK)) { mthca_warn(dev, "Firmware doorbell region at 0x%016llx, " "length 0x%x crosses a page boundary\n", (unsigned long long) base, max_off); return; } addr = pci_resource_start(dev->pdev, 2) + ((pci_resource_len(dev->pdev, 2) - 1) & base); dev->cmd.dbell_map = ioremap(addr, max_off + sizeof(u32)); if (!dev->cmd.dbell_map) return; dev->cmd.flags |= MTHCA_CMD_POST_DOORBELLS; mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n"); } int mthca_QUERY_FW(struct mthca_dev *dev) { struct mthca_mailbox *mailbox; u32 *outbox; u64 base; u32 tmp; int err = 0; u8 lg; int i; #define QUERY_FW_OUT_SIZE 0x100 #define QUERY_FW_VER_OFFSET 0x00 #define QUERY_FW_MAX_CMD_OFFSET 0x0f #define QUERY_FW_ERR_START_OFFSET 0x30 #define QUERY_FW_ERR_SIZE_OFFSET 0x38 #define QUERY_FW_CMD_DB_EN_OFFSET 0x10 #define QUERY_FW_CMD_DB_OFFSET 0x50 #define QUERY_FW_CMD_DB_BASE 0x60 #define QUERY_FW_START_OFFSET 0x20 #define QUERY_FW_END_OFFSET 0x28 #define QUERY_FW_SIZE_OFFSET 0x00 #define QUERY_FW_CLR_INT_BASE_OFFSET 0x20 #define QUERY_FW_EQ_ARM_BASE_OFFSET 0x40 #define QUERY_FW_EQ_SET_CI_BASE_OFFSET 0x48 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); outbox = mailbox->buf; err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_FW, CMD_TIME_CLASS_A); if (err) goto out; MTHCA_GET(dev->fw_ver, outbox, QUERY_FW_VER_OFFSET); /* * FW subminor version is at more significant bits than minor * version, so swap here. */ dev->fw_ver = (dev->fw_ver & 0xffff00000000ull) | ((dev->fw_ver & 0xffff0000ull) >> 16) | ((dev->fw_ver & 0x0000ffffull) << 16); MTHCA_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET); dev->cmd.max_cmds = 1 << lg; mthca_dbg(dev, "FW version %012llx, max commands %d\n", (unsigned long long) dev->fw_ver, dev->cmd.max_cmds); MTHCA_GET(dev->catas_err.addr, outbox, QUERY_FW_ERR_START_OFFSET); MTHCA_GET(dev->catas_err.size, outbox, QUERY_FW_ERR_SIZE_OFFSET); mthca_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x\n", (unsigned long long) dev->catas_err.addr, dev->catas_err.size); MTHCA_GET(tmp, outbox, QUERY_FW_CMD_DB_EN_OFFSET); if (tmp & 0x1) { mthca_dbg(dev, "FW supports commands through doorbells\n"); MTHCA_GET(base, outbox, QUERY_FW_CMD_DB_BASE); for (i = 0; i < MTHCA_CMD_NUM_DBELL_DWORDS; ++i) MTHCA_GET(dev->cmd.dbell_offsets[i], outbox, QUERY_FW_CMD_DB_OFFSET + (i << 1)); mthca_setup_cmd_doorbells(dev, base); } if (mthca_is_memfree(dev)) { MTHCA_GET(dev->fw.arbel.fw_pages, outbox, QUERY_FW_SIZE_OFFSET); MTHCA_GET(dev->fw.arbel.clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET); MTHCA_GET(dev->fw.arbel.eq_arm_base, outbox, QUERY_FW_EQ_ARM_BASE_OFFSET); MTHCA_GET(dev->fw.arbel.eq_set_ci_base, outbox, QUERY_FW_EQ_SET_CI_BASE_OFFSET); mthca_dbg(dev, "FW size %d KB\n", dev->fw.arbel.fw_pages << 2); /* * Round up number of system pages needed in case * MTHCA_ICM_PAGE_SIZE < PAGE_SIZE. */ dev->fw.arbel.fw_pages = ALIGN(dev->fw.arbel.fw_pages, PAGE_SIZE / MTHCA_ICM_PAGE_SIZE) >> (PAGE_SHIFT - MTHCA_ICM_PAGE_SHIFT); mthca_dbg(dev, "Clear int @ %llx, EQ arm @ %llx, EQ set CI @ %llx\n", (unsigned long long) dev->fw.arbel.clr_int_base, (unsigned long long) dev->fw.arbel.eq_arm_base, (unsigned long long) dev->fw.arbel.eq_set_ci_base); } else { MTHCA_GET(dev->fw.tavor.fw_start, outbox, QUERY_FW_START_OFFSET); MTHCA_GET(dev->fw.tavor.fw_end, outbox, QUERY_FW_END_OFFSET); mthca_dbg(dev, "FW size %d KB (start %llx, end %llx)\n", (int) ((dev->fw.tavor.fw_end - dev->fw.tavor.fw_start) >> 10), (unsigned long long) dev->fw.tavor.fw_start, (unsigned long long) dev->fw.tavor.fw_end); } out: mthca_free_mailbox(dev, mailbox); return err; } int mthca_ENABLE_LAM(struct mthca_dev *dev) { struct mthca_mailbox *mailbox; u8 info; u32 *outbox; int err = 0; #define ENABLE_LAM_OUT_SIZE 0x100 #define ENABLE_LAM_START_OFFSET 0x00 #define ENABLE_LAM_END_OFFSET 0x08 #define ENABLE_LAM_INFO_OFFSET 0x13 #define ENABLE_LAM_INFO_HIDDEN_FLAG (1 << 4) #define ENABLE_LAM_INFO_ECC_MASK 0x3 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); outbox = mailbox->buf; err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_ENABLE_LAM, CMD_TIME_CLASS_C); if (err) goto out; MTHCA_GET(dev->ddr_start, outbox, ENABLE_LAM_START_OFFSET); MTHCA_GET(dev->ddr_end, outbox, ENABLE_LAM_END_OFFSET); MTHCA_GET(info, outbox, ENABLE_LAM_INFO_OFFSET); if (!!(info & ENABLE_LAM_INFO_HIDDEN_FLAG) != !!(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) { mthca_info(dev, "FW reports that HCA-attached memory " "is %s hidden; does not match PCI config\n", (info & ENABLE_LAM_INFO_HIDDEN_FLAG) ? "" : "not"); } if (info & ENABLE_LAM_INFO_HIDDEN_FLAG) mthca_dbg(dev, "HCA-attached memory is hidden.\n"); mthca_dbg(dev, "HCA memory size %d KB (start %llx, end %llx)\n", (int) ((dev->ddr_end - dev->ddr_start) >> 10), (unsigned long long) dev->ddr_start, (unsigned long long) dev->ddr_end); out: mthca_free_mailbox(dev, mailbox); return err; } int mthca_DISABLE_LAM(struct mthca_dev *dev) { return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, CMD_TIME_CLASS_C); } int mthca_QUERY_DDR(struct mthca_dev *dev) { struct mthca_mailbox *mailbox; u8 info; u32 *outbox; int err = 0; #define QUERY_DDR_OUT_SIZE 0x100 #define QUERY_DDR_START_OFFSET 0x00 #define QUERY_DDR_END_OFFSET 0x08 #define QUERY_DDR_INFO_OFFSET 0x13 #define QUERY_DDR_INFO_HIDDEN_FLAG (1 << 4) #define QUERY_DDR_INFO_ECC_MASK 0x3 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); outbox = mailbox->buf; err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DDR, CMD_TIME_CLASS_A); if (err) goto out; MTHCA_GET(dev->ddr_start, outbox, QUERY_DDR_START_OFFSET); MTHCA_GET(dev->ddr_end, outbox, QUERY_DDR_END_OFFSET); MTHCA_GET(info, outbox, QUERY_DDR_INFO_OFFSET); if (!!(info & QUERY_DDR_INFO_HIDDEN_FLAG) != !!(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) { mthca_info(dev, "FW reports that HCA-attached memory " "is %s hidden; does not match PCI config\n", (info & QUERY_DDR_INFO_HIDDEN_FLAG) ? "" : "not"); } if (info & QUERY_DDR_INFO_HIDDEN_FLAG) mthca_dbg(dev, "HCA-attached memory is hidden.\n"); mthca_dbg(dev, "HCA memory size %d KB (start %llx, end %llx)\n", (int) ((dev->ddr_end - dev->ddr_start) >> 10), (unsigned long long) dev->ddr_start, (unsigned long long) dev->ddr_end); out: mthca_free_mailbox(dev, mailbox); return err; } int mthca_QUERY_DEV_LIM(struct mthca_dev *dev, struct mthca_dev_lim *dev_lim) { struct mthca_mailbox *mailbox; u32 *outbox; u8 field; u16 size; u16 stat_rate; int err; #define QUERY_DEV_LIM_OUT_SIZE 0x100 #define QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET 0x10 #define QUERY_DEV_LIM_MAX_QP_SZ_OFFSET 0x11 #define QUERY_DEV_LIM_RSVD_QP_OFFSET 0x12 #define QUERY_DEV_LIM_MAX_QP_OFFSET 0x13 #define QUERY_DEV_LIM_RSVD_SRQ_OFFSET 0x14 #define QUERY_DEV_LIM_MAX_SRQ_OFFSET 0x15 #define QUERY_DEV_LIM_RSVD_EEC_OFFSET 0x16 #define QUERY_DEV_LIM_MAX_EEC_OFFSET 0x17 #define QUERY_DEV_LIM_MAX_CQ_SZ_OFFSET 0x19 #define QUERY_DEV_LIM_RSVD_CQ_OFFSET 0x1a #define QUERY_DEV_LIM_MAX_CQ_OFFSET 0x1b #define QUERY_DEV_LIM_MAX_MPT_OFFSET 0x1d #define QUERY_DEV_LIM_RSVD_EQ_OFFSET 0x1e #define QUERY_DEV_LIM_MAX_EQ_OFFSET 0x1f #define QUERY_DEV_LIM_RSVD_MTT_OFFSET 0x20 #define QUERY_DEV_LIM_MAX_MRW_SZ_OFFSET 0x21 #define QUERY_DEV_LIM_RSVD_MRW_OFFSET 0x22 #define QUERY_DEV_LIM_MAX_MTT_SEG_OFFSET 0x23 #define QUERY_DEV_LIM_MAX_AV_OFFSET 0x27 #define QUERY_DEV_LIM_MAX_REQ_QP_OFFSET 0x29 #define QUERY_DEV_LIM_MAX_RES_QP_OFFSET 0x2b #define QUERY_DEV_LIM_MAX_RDMA_OFFSET 0x2f #define QUERY_DEV_LIM_RSZ_SRQ_OFFSET 0x33 #define QUERY_DEV_LIM_ACK_DELAY_OFFSET 0x35 #define QUERY_DEV_LIM_MTU_WIDTH_OFFSET 0x36 #define QUERY_DEV_LIM_VL_PORT_OFFSET 0x37 #define QUERY_DEV_LIM_MAX_GID_OFFSET 0x3b #define QUERY_DEV_LIM_RATE_SUPPORT_OFFSET 0x3c #define QUERY_DEV_LIM_MAX_PKEY_OFFSET 0x3f #define QUERY_DEV_LIM_FLAGS_OFFSET 0x44 #define QUERY_DEV_LIM_RSVD_UAR_OFFSET 0x48 #define QUERY_DEV_LIM_UAR_SZ_OFFSET 0x49 #define QUERY_DEV_LIM_PAGE_SZ_OFFSET 0x4b #define QUERY_DEV_LIM_MAX_SG_OFFSET 0x51 #define QUERY_DEV_LIM_MAX_DESC_SZ_OFFSET 0x52 #define QUERY_DEV_LIM_MAX_SG_RQ_OFFSET 0x55 #define QUERY_DEV_LIM_MAX_DESC_SZ_RQ_OFFSET 0x56 #define QUERY_DEV_LIM_MAX_QP_MCG_OFFSET 0x61 #define QUERY_DEV_LIM_RSVD_MCG_OFFSET 0x62 #define QUERY_DEV_LIM_MAX_MCG_OFFSET 0x63 #define QUERY_DEV_LIM_RSVD_PD_OFFSET 0x64 #define QUERY_DEV_LIM_MAX_PD_OFFSET 0x65 #define QUERY_DEV_LIM_RSVD_RDD_OFFSET 0x66 #define QUERY_DEV_LIM_MAX_RDD_OFFSET 0x67 #define QUERY_DEV_LIM_EEC_ENTRY_SZ_OFFSET 0x80 #define QUERY_DEV_LIM_QPC_ENTRY_SZ_OFFSET 0x82 #define QUERY_DEV_LIM_EEEC_ENTRY_SZ_OFFSET 0x84 #define QUERY_DEV_LIM_EQPC_ENTRY_SZ_OFFSET 0x86 #define QUERY_DEV_LIM_EQC_ENTRY_SZ_OFFSET 0x88 #define QUERY_DEV_LIM_CQC_ENTRY_SZ_OFFSET 0x8a #define QUERY_DEV_LIM_SRQ_ENTRY_SZ_OFFSET 0x8c #define QUERY_DEV_LIM_UAR_ENTRY_SZ_OFFSET 0x8e #define QUERY_DEV_LIM_MTT_ENTRY_SZ_OFFSET 0x90 #define QUERY_DEV_LIM_MPT_ENTRY_SZ_OFFSET 0x92 #define QUERY_DEV_LIM_PBL_SZ_OFFSET 0x96 #define QUERY_DEV_LIM_BMME_FLAGS_OFFSET 0x97 #define QUERY_DEV_LIM_RSVD_LKEY_OFFSET 0x98 #define QUERY_DEV_LIM_LAMR_OFFSET 0x9f #define QUERY_DEV_LIM_MAX_ICM_SZ_OFFSET 0xa0 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); outbox = mailbox->buf; err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DEV_LIM, CMD_TIME_CLASS_A); if (err) goto out; MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_QP_OFFSET); dev_lim->reserved_qps = 1 << (field & 0xf); MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_OFFSET); dev_lim->max_qps = 1 << (field & 0x1f); MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_SRQ_OFFSET); dev_lim->reserved_srqs = 1 << (field >> 4); MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_OFFSET); dev_lim->max_srqs = 1 << (field & 0x1f); MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_EEC_OFFSET); dev_lim->reserved_eecs = 1 << (field & 0xf); MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_EEC_OFFSET); dev_lim->max_eecs = 1 << (field & 0x1f); MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_CQ_SZ_OFFSET); dev_lim->max_cq_sz = 1 << field; MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_CQ_OFFSET); dev_lim->reserved_cqs = 1 << (field & 0xf); MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_CQ_OFFSET); dev_lim->max_cqs = 1 << (field & 0x1f); MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MPT_OFFSET); dev_lim->max_mpts = 1 << (field & 0x3f); MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_EQ_OFFSET); dev_lim->reserved_eqs = 1 << (field & 0xf); MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_EQ_OFFSET); dev_lim->max_eqs = 1 << (field & 0x7); MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MTT_OFFSET); if (mthca_is_memfree(dev)) dev_lim->reserved_mtts = ALIGN((1 << (field >> 4)) * sizeof(u64), dev->limits.mtt_seg_size) / dev->limits.mtt_seg_size; else dev_lim->reserved_mtts = 1 << (field >> 4); MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MRW_SZ_OFFSET); dev_lim->max_mrw_sz = 1 << field; MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MRW_OFFSET); dev_lim->reserved_mrws = 1 << (field & 0xf); MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MTT_SEG_OFFSET); dev_lim->max_mtt_seg = 1 << (field & 0x3f); MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_REQ_QP_OFFSET); dev_lim->max_requester_per_qp = 1 << (field & 0x3f); MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_RES_QP_OFFSET); dev_lim->max_responder_per_qp = 1 << (field & 0x3f); MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_RDMA_OFFSET); dev_lim->max_rdma_global = 1 << (field & 0x3f); MTHCA_GET(field, outbox, QUERY_DEV_LIM_ACK_DELAY_OFFSET); dev_lim->local_ca_ack_delay = field & 0x1f; MTHCA_GET(field, outbox, QUERY_DEV_LIM_MTU_WIDTH_OFFSET); dev_lim->max_mtu = field >> 4; dev_lim->max_port_width = field & 0xf; MTHCA_GET(field, outbox, QUERY_DEV_LIM_VL_PORT_OFFSET); dev_lim->max_vl = field >> 4; dev_lim->num_ports = field & 0xf; MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_GID_OFFSET); dev_lim->max_gids = 1 << (field & 0xf); MTHCA_GET(stat_rate, outbox, QUERY_DEV_LIM_RATE_SUPPORT_OFFSET); dev_lim->stat_rate_support = stat_rate; MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_PKEY_OFFSET); dev_lim->max_pkeys = 1 << (field & 0xf); MTHCA_GET(dev_lim->flags, outbox, QUERY_DEV_LIM_FLAGS_OFFSET); MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_UAR_OFFSET); dev_lim->reserved_uars = field >> 4; MTHCA_GET(field, outbox, QUERY_DEV_LIM_UAR_SZ_OFFSET); dev_lim->uar_size = 1 << ((field & 0x3f) + 20); MTHCA_GET(field, outbox, QUERY_DEV_LIM_PAGE_SZ_OFFSET); dev_lim->min_page_sz = 1 << field; MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SG_OFFSET); dev_lim->max_sg = field; MTHCA_GET(size, outbox, QUERY_DEV_LIM_MAX_DESC_SZ_OFFSET); dev_lim->max_desc_sz = size; MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_MCG_OFFSET); dev_lim->max_qp_per_mcg = 1 << field; MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MCG_OFFSET); dev_lim->reserved_mgms = field & 0xf; MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MCG_OFFSET); dev_lim->max_mcgs = 1 << field; MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_PD_OFFSET); dev_lim->reserved_pds = field >> 4; MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_PD_OFFSET); dev_lim->max_pds = 1 << (field & 0x3f); MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_RDD_OFFSET); dev_lim->reserved_rdds = field >> 4; MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_RDD_OFFSET); dev_lim->max_rdds = 1 << (field & 0x3f); MTHCA_GET(size, outbox, QUERY_DEV_LIM_EEC_ENTRY_SZ_OFFSET); dev_lim->eec_entry_sz = size; MTHCA_GET(size, outbox, QUERY_DEV_LIM_QPC_ENTRY_SZ_OFFSET); dev_lim->qpc_entry_sz = size; MTHCA_GET(size, outbox, QUERY_DEV_LIM_EEEC_ENTRY_SZ_OFFSET); dev_lim->eeec_entry_sz = size; MTHCA_GET(size, outbox, QUERY_DEV_LIM_EQPC_ENTRY_SZ_OFFSET); dev_lim->eqpc_entry_sz = size; MTHCA_GET(size, outbox, QUERY_DEV_LIM_EQC_ENTRY_SZ_OFFSET); dev_lim->eqc_entry_sz = size; MTHCA_GET(size, outbox, QUERY_DEV_LIM_CQC_ENTRY_SZ_OFFSET); dev_lim->cqc_entry_sz = size; MTHCA_GET(size, outbox, QUERY_DEV_LIM_SRQ_ENTRY_SZ_OFFSET); dev_lim->srq_entry_sz = size; MTHCA_GET(size, outbox, QUERY_DEV_LIM_UAR_ENTRY_SZ_OFFSET); dev_lim->uar_scratch_entry_sz = size; if (mthca_is_memfree(dev)) { MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET); dev_lim->max_srq_sz = 1 << field; MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_SZ_OFFSET); dev_lim->max_qp_sz = 1 << field; MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSZ_SRQ_OFFSET); dev_lim->hca.arbel.resize_srq = field & 1; MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SG_RQ_OFFSET); dev_lim->max_sg = min_t(int, field, dev_lim->max_sg); MTHCA_GET(size, outbox, QUERY_DEV_LIM_MAX_DESC_SZ_RQ_OFFSET); dev_lim->max_desc_sz = min_t(int, size, dev_lim->max_desc_sz); MTHCA_GET(size, outbox, QUERY_DEV_LIM_MPT_ENTRY_SZ_OFFSET); dev_lim->mpt_entry_sz = size; MTHCA_GET(field, outbox, QUERY_DEV_LIM_PBL_SZ_OFFSET); dev_lim->hca.arbel.max_pbl_sz = 1 << (field & 0x3f); MTHCA_GET(dev_lim->hca.arbel.bmme_flags, outbox, QUERY_DEV_LIM_BMME_FLAGS_OFFSET); MTHCA_GET(dev_lim->hca.arbel.reserved_lkey, outbox, QUERY_DEV_LIM_RSVD_LKEY_OFFSET); MTHCA_GET(field, outbox, QUERY_DEV_LIM_LAMR_OFFSET); dev_lim->hca.arbel.lam_required = field & 1; MTHCA_GET(dev_lim->hca.arbel.max_icm_sz, outbox, QUERY_DEV_LIM_MAX_ICM_SZ_OFFSET); if (dev_lim->hca.arbel.bmme_flags & 1) mthca_dbg(dev, "Base MM extensions: yes " "(flags %d, max PBL %d, rsvd L_Key %08x)\n", dev_lim->hca.arbel.bmme_flags, dev_lim->hca.arbel.max_pbl_sz, dev_lim->hca.arbel.reserved_lkey); else mthca_dbg(dev, "Base MM extensions: no\n"); mthca_dbg(dev, "Max ICM size %lld MB\n", (unsigned long long) dev_lim->hca.arbel.max_icm_sz >> 20); } else { MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET); dev_lim->max_srq_sz = (1 << field) - 1; MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_SZ_OFFSET); dev_lim->max_qp_sz = (1 << field) - 1; MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_AV_OFFSET); dev_lim->hca.tavor.max_avs = 1 << (field & 0x3f); dev_lim->mpt_entry_sz = MTHCA_MPT_ENTRY_SIZE; } mthca_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n", dev_lim->max_qps, dev_lim->reserved_qps, dev_lim->qpc_entry_sz); mthca_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n", dev_lim->max_srqs, dev_lim->reserved_srqs, dev_lim->srq_entry_sz); mthca_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n", dev_lim->max_cqs, dev_lim->reserved_cqs, dev_lim->cqc_entry_sz); mthca_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n", dev_lim->max_eqs, dev_lim->reserved_eqs, dev_lim->eqc_entry_sz); mthca_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n", dev_lim->reserved_mrws, dev_lim->reserved_mtts); mthca_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n", dev_lim->max_pds, dev_lim->reserved_pds, dev_lim->reserved_uars); mthca_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n", dev_lim->max_pds, dev_lim->reserved_mgms); mthca_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n", dev_lim->max_cq_sz, dev_lim->max_qp_sz, dev_lim->max_srq_sz); mthca_dbg(dev, "Flags: %08x\n", dev_lim->flags); out: mthca_free_mailbox(dev, mailbox); return err; } static void get_board_id(void *vsd, char *board_id) { int i; #define VSD_OFFSET_SIG1 0x00 #define VSD_OFFSET_SIG2 0xde #define VSD_OFFSET_MLX_BOARD_ID 0xd0 #define VSD_OFFSET_TS_BOARD_ID 0x20 #define VSD_SIGNATURE_TOPSPIN 0x5ad memset(board_id, 0, MTHCA_BOARD_ID_LEN); if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN && be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) { strscpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MTHCA_BOARD_ID_LEN); } else { /* * The board ID is a string but the firmware byte * swaps each 4-byte word before passing it back to * us. Therefore we need to swab it before printing. */ for (i = 0; i < 4; ++i) ((u32 *) board_id)[i] = swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4)); } } int mthca_QUERY_ADAPTER(struct mthca_dev *dev, struct mthca_adapter *adapter) { struct mthca_mailbox *mailbox; u32 *outbox; int err; #define QUERY_ADAPTER_OUT_SIZE 0x100 #define QUERY_ADAPTER_VENDOR_ID_OFFSET 0x00 #define QUERY_ADAPTER_DEVICE_ID_OFFSET 0x04 #define QUERY_ADAPTER_REVISION_ID_OFFSET 0x08 #define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10 #define QUERY_ADAPTER_VSD_OFFSET 0x20 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); outbox = mailbox->buf; err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_ADAPTER, CMD_TIME_CLASS_A); if (err) goto out; if (!mthca_is_memfree(dev)) { MTHCA_GET(adapter->vendor_id, outbox, QUERY_ADAPTER_VENDOR_ID_OFFSET); MTHCA_GET(adapter->device_id, outbox, QUERY_ADAPTER_DEVICE_ID_OFFSET); MTHCA_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET); } MTHCA_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET); get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4, adapter->board_id); out: mthca_free_mailbox(dev, mailbox); return err; } int mthca_INIT_HCA(struct mthca_dev *dev, struct mthca_init_hca_param *param) { struct mthca_mailbox *mailbox; __be32 *inbox; int err; #define INIT_HCA_IN_SIZE 0x200 #define INIT_HCA_FLAGS1_OFFSET 0x00c #define INIT_HCA_FLAGS2_OFFSET 0x014 #define INIT_HCA_QPC_OFFSET 0x020 #define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10) #define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17) #define INIT_HCA_EEC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x20) #define INIT_HCA_LOG_EEC_OFFSET (INIT_HCA_QPC_OFFSET + 0x27) #define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28) #define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f) #define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30) #define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37) #define INIT_HCA_EQPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40) #define INIT_HCA_EEEC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50) #define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60) #define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67) #define INIT_HCA_RDB_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70) #define INIT_HCA_UDAV_OFFSET 0x0b0 #define INIT_HCA_UDAV_LKEY_OFFSET (INIT_HCA_UDAV_OFFSET + 0x0) #define INIT_HCA_UDAV_PD_OFFSET (INIT_HCA_UDAV_OFFSET + 0x4) #define INIT_HCA_MCAST_OFFSET 0x0c0 #define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00) #define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12) #define INIT_HCA_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16) #define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b) #define INIT_HCA_TPT_OFFSET 0x0f0 #define INIT_HCA_MPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00) #define INIT_HCA_MTT_SEG_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x09) #define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b) #define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10) #define INIT_HCA_UAR_OFFSET 0x120 #define INIT_HCA_UAR_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x00) #define INIT_HCA_UARC_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x09) #define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a) #define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b) #define INIT_HCA_UAR_SCATCH_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x10) #define INIT_HCA_UAR_CTX_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x18) mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); inbox = mailbox->buf; memset(inbox, 0, INIT_HCA_IN_SIZE); if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT) MTHCA_PUT(inbox, 0x1, INIT_HCA_FLAGS1_OFFSET); #if defined(__LITTLE_ENDIAN) *(inbox + INIT_HCA_FLAGS2_OFFSET / 4) &= ~cpu_to_be32(1 << 1); #elif defined(__BIG_ENDIAN) *(inbox + INIT_HCA_FLAGS2_OFFSET / 4) |= cpu_to_be32(1 << 1); #else #error Host endianness not defined #endif /* Check port for UD address vector: */ *(inbox + INIT_HCA_FLAGS2_OFFSET / 4) |= cpu_to_be32(1); /* Enable IPoIB checksumming if we can: */ if (dev->device_cap_flags & IB_DEVICE_UD_IP_CSUM) *(inbox + INIT_HCA_FLAGS2_OFFSET / 4) |= cpu_to_be32(7 << 3); /* We leave wqe_quota, responder_exu, etc as 0 (default) */ /* QPC/EEC/CQC/EQC/RDB attributes */ MTHCA_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET); MTHCA_PUT(inbox, param->log_num_qps, INIT_HCA_LOG_QP_OFFSET); MTHCA_PUT(inbox, param->eec_base, INIT_HCA_EEC_BASE_OFFSET); MTHCA_PUT(inbox, param->log_num_eecs, INIT_HCA_LOG_EEC_OFFSET); MTHCA_PUT(inbox, param->srqc_base, INIT_HCA_SRQC_BASE_OFFSET); MTHCA_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET); MTHCA_PUT(inbox, param->cqc_base, INIT_HCA_CQC_BASE_OFFSET); MTHCA_PUT(inbox, param->log_num_cqs, INIT_HCA_LOG_CQ_OFFSET); MTHCA_PUT(inbox, param->eqpc_base, INIT_HCA_EQPC_BASE_OFFSET); MTHCA_PUT(inbox, param->eeec_base, INIT_HCA_EEEC_BASE_OFFSET); MTHCA_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET); MTHCA_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET); MTHCA_PUT(inbox, param->rdb_base, INIT_HCA_RDB_BASE_OFFSET); /* UD AV attributes */ /* multicast attributes */ MTHCA_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET); MTHCA_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); MTHCA_PUT(inbox, param->mc_hash_sz, INIT_HCA_MC_HASH_SZ_OFFSET); MTHCA_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); /* TPT attributes */ MTHCA_PUT(inbox, param->mpt_base, INIT_HCA_MPT_BASE_OFFSET); if (!mthca_is_memfree(dev)) MTHCA_PUT(inbox, param->mtt_seg_sz, INIT_HCA_MTT_SEG_SZ_OFFSET); MTHCA_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET); MTHCA_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET); /* UAR attributes */ { u8 uar_page_sz = PAGE_SHIFT - 12; MTHCA_PUT(inbox, uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET); } MTHCA_PUT(inbox, param->uar_scratch_base, INIT_HCA_UAR_SCATCH_BASE_OFFSET); if (mthca_is_memfree(dev)) { MTHCA_PUT(inbox, param->log_uarc_sz, INIT_HCA_UARC_SZ_OFFSET); MTHCA_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET); MTHCA_PUT(inbox, param->uarc_base, INIT_HCA_UAR_CTX_BASE_OFFSET); } err = mthca_cmd(dev, mailbox->dma, 0, 0, CMD_INIT_HCA, CMD_TIME_CLASS_D); mthca_free_mailbox(dev, mailbox); return err; } int mthca_INIT_IB(struct mthca_dev *dev, struct mthca_init_ib_param *param, int port) { struct mthca_mailbox *mailbox; u32 *inbox; int err; u32 flags; #define INIT_IB_IN_SIZE 56 #define INIT_IB_FLAGS_OFFSET 0x00 #define INIT_IB_FLAG_SIG (1 << 18) #define INIT_IB_FLAG_NG (1 << 17) #define INIT_IB_FLAG_G0 (1 << 16) #define INIT_IB_VL_SHIFT 4 #define INIT_IB_PORT_WIDTH_SHIFT 8 #define INIT_IB_MTU_SHIFT 12 #define INIT_IB_MAX_GID_OFFSET 0x06 #define INIT_IB_MAX_PKEY_OFFSET 0x0a #define INIT_IB_GUID0_OFFSET 0x10 #define INIT_IB_NODE_GUID_OFFSET 0x18 #define INIT_IB_SI_GUID_OFFSET 0x20 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); inbox = mailbox->buf; memset(inbox, 0, INIT_IB_IN_SIZE); flags = 0; flags |= param->set_guid0 ? INIT_IB_FLAG_G0 : 0; flags |= param->set_node_guid ? INIT_IB_FLAG_NG : 0; flags |= param->set_si_guid ? INIT_IB_FLAG_SIG : 0; flags |= param->vl_cap << INIT_IB_VL_SHIFT; flags |= param->port_width << INIT_IB_PORT_WIDTH_SHIFT; flags |= param->mtu_cap << INIT_IB_MTU_SHIFT; MTHCA_PUT(inbox, flags, INIT_IB_FLAGS_OFFSET); MTHCA_PUT(inbox, param->gid_cap, INIT_IB_MAX_GID_OFFSET); MTHCA_PUT(inbox, param->pkey_cap, INIT_IB_MAX_PKEY_OFFSET); MTHCA_PUT(inbox, param->guid0, INIT_IB_GUID0_OFFSET); MTHCA_PUT(inbox, param->node_guid, INIT_IB_NODE_GUID_OFFSET); MTHCA_PUT(inbox, param->si_guid, INIT_IB_SI_GUID_OFFSET); err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_INIT_IB, CMD_TIME_CLASS_A); mthca_free_mailbox(dev, mailbox); return err; } int mthca_CLOSE_IB(struct mthca_dev *dev, int port) { return mthca_cmd(dev, 0, port, 0, CMD_CLOSE_IB, CMD_TIME_CLASS_A); } int mthca_CLOSE_HCA(struct mthca_dev *dev, int panic) { return mthca_cmd(dev, 0, 0, panic, CMD_CLOSE_HCA, CMD_TIME_CLASS_C); } int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param, int port) { struct mthca_mailbox *mailbox; u32 *inbox; int err; u32 flags = 0; #define SET_IB_IN_SIZE 0x40 #define SET_IB_FLAGS_OFFSET 0x00 #define SET_IB_FLAG_SIG (1 << 18) #define SET_IB_FLAG_RQK (1 << 0) #define SET_IB_CAP_MASK_OFFSET 0x04 #define SET_IB_SI_GUID_OFFSET 0x08 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); inbox = mailbox->buf; memset(inbox, 0, SET_IB_IN_SIZE); flags |= param->set_si_guid ? SET_IB_FLAG_SIG : 0; flags |= param->reset_qkey_viol ? SET_IB_FLAG_RQK : 0; MTHCA_PUT(inbox, flags, SET_IB_FLAGS_OFFSET); MTHCA_PUT(inbox, param->cap_mask, SET_IB_CAP_MASK_OFFSET); MTHCA_PUT(inbox, param->si_guid, SET_IB_SI_GUID_OFFSET); err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_SET_IB, CMD_TIME_CLASS_B); mthca_free_mailbox(dev, mailbox); return err; } int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt) { return mthca_map_cmd(dev, CMD_MAP_ICM, icm, virt); } int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt) { struct mthca_mailbox *mailbox; __be64 *inbox; int err; mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); inbox = mailbox->buf; inbox[0] = cpu_to_be64(virt); inbox[1] = cpu_to_be64(dma_addr); err = mthca_cmd(dev, mailbox->dma, 1, 0, CMD_MAP_ICM, CMD_TIME_CLASS_B); mthca_free_mailbox(dev, mailbox); if (!err) mthca_dbg(dev, "Mapped page at %llx to %llx for ICM.\n", (unsigned long long) dma_addr, (unsigned long long) virt); return err; } int mthca_UNMAP_ICM(struct mthca_dev *dev, u64 virt, u32 page_count) { mthca_dbg(dev, "Unmapping %d pages at %llx from ICM.\n", page_count, (unsigned long long) virt); return mthca_cmd(dev, virt, page_count, 0, CMD_UNMAP_ICM, CMD_TIME_CLASS_B); } int mthca_MAP_ICM_AUX(struct mthca_dev *dev, struct mthca_icm *icm) { return mthca_map_cmd(dev, CMD_MAP_ICM_AUX, icm, -1); } int mthca_UNMAP_ICM_AUX(struct mthca_dev *dev) { return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_ICM_AUX, CMD_TIME_CLASS_B); } int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages) { int ret = mthca_cmd_imm(dev, icm_size, aux_pages, 0, 0, CMD_SET_ICM_SIZE, CMD_TIME_CLASS_A); if (ret) return ret; /* * Round up number of system pages needed in case * MTHCA_ICM_PAGE_SIZE < PAGE_SIZE. */ *aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MTHCA_ICM_PAGE_SIZE) >> (PAGE_SHIFT - MTHCA_ICM_PAGE_SHIFT); return 0; } int mthca_SW2HW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int mpt_index) { return mthca_cmd(dev, mailbox->dma, mpt_index, 0, CMD_SW2HW_MPT, CMD_TIME_CLASS_B); } int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int mpt_index) { return mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index, !mailbox, CMD_HW2SW_MPT, CMD_TIME_CLASS_B); } int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int num_mtt) { return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT, CMD_TIME_CLASS_B); } int mthca_SYNC_TPT(struct mthca_dev *dev) { return mthca_cmd(dev, 0, 0, 0, CMD_SYNC_TPT, CMD_TIME_CLASS_B); } int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap, int eq_num) { mthca_dbg(dev, "%s mask %016llx for eqn %d\n", unmap ? "Clearing" : "Setting", (unsigned long long) event_mask, eq_num); return mthca_cmd(dev, event_mask, (unmap << 31) | eq_num, 0, CMD_MAP_EQ, CMD_TIME_CLASS_B); } int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int eq_num) { return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ, CMD_TIME_CLASS_A); } int mthca_HW2SW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int eq_num) { return mthca_cmd_box(dev, 0, mailbox->dma, eq_num, 0, CMD_HW2SW_EQ, CMD_TIME_CLASS_A); } int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int cq_num) { return mthca_cmd(dev, mailbox->dma, cq_num, 0, CMD_SW2HW_CQ, CMD_TIME_CLASS_A); } int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int cq_num) { return mthca_cmd_box(dev, 0, mailbox->dma, cq_num, 0, CMD_HW2SW_CQ, CMD_TIME_CLASS_A); } int mthca_RESIZE_CQ(struct mthca_dev *dev, int cq_num, u32 lkey, u8 log_size) { struct mthca_mailbox *mailbox; __be32 *inbox; int err; #define RESIZE_CQ_IN_SIZE 0x40 #define RESIZE_CQ_LOG_SIZE_OFFSET 0x0c #define RESIZE_CQ_LKEY_OFFSET 0x1c mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); inbox = mailbox->buf; memset(inbox, 0, RESIZE_CQ_IN_SIZE); /* * Leave start address fields zeroed out -- mthca assumes that * MRs for CQs always start at virtual address 0. */ MTHCA_PUT(inbox, log_size, RESIZE_CQ_LOG_SIZE_OFFSET); MTHCA_PUT(inbox, lkey, RESIZE_CQ_LKEY_OFFSET); err = mthca_cmd(dev, mailbox->dma, cq_num, 1, CMD_RESIZE_CQ, CMD_TIME_CLASS_B); mthca_free_mailbox(dev, mailbox); return err; } int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int srq_num) { return mthca_cmd(dev, mailbox->dma, srq_num, 0, CMD_SW2HW_SRQ, CMD_TIME_CLASS_A); } int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int srq_num) { return mthca_cmd_box(dev, 0, mailbox->dma, srq_num, 0, CMD_HW2SW_SRQ, CMD_TIME_CLASS_A); } int mthca_QUERY_SRQ(struct mthca_dev *dev, u32 num, struct mthca_mailbox *mailbox) { return mthca_cmd_box(dev, 0, mailbox->dma, num, 0, CMD_QUERY_SRQ, CMD_TIME_CLASS_A); } int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit) { return mthca_cmd(dev, limit, srq_num, 0, CMD_ARM_SRQ, CMD_TIME_CLASS_B); } int mthca_MODIFY_QP(struct mthca_dev *dev, enum ib_qp_state cur, enum ib_qp_state next, u32 num, int is_ee, struct mthca_mailbox *mailbox, u32 optmask) { static const u16 op[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { [IB_QPS_RESET] = { [IB_QPS_RESET] = CMD_ERR2RST_QPEE, [IB_QPS_ERR] = CMD_2ERR_QPEE, [IB_QPS_INIT] = CMD_RST2INIT_QPEE, }, [IB_QPS_INIT] = { [IB_QPS_RESET] = CMD_ERR2RST_QPEE, [IB_QPS_ERR] = CMD_2ERR_QPEE, [IB_QPS_INIT] = CMD_INIT2INIT_QPEE, [IB_QPS_RTR] = CMD_INIT2RTR_QPEE, }, [IB_QPS_RTR] = { [IB_QPS_RESET] = CMD_ERR2RST_QPEE, [IB_QPS_ERR] = CMD_2ERR_QPEE, [IB_QPS_RTS] = CMD_RTR2RTS_QPEE, }, [IB_QPS_RTS] = { [IB_QPS_RESET] = CMD_ERR2RST_QPEE, [IB_QPS_ERR] = CMD_2ERR_QPEE, [IB_QPS_RTS] = CMD_RTS2RTS_QPEE, [IB_QPS_SQD] = CMD_RTS2SQD_QPEE, }, [IB_QPS_SQD] = { [IB_QPS_RESET] = CMD_ERR2RST_QPEE, [IB_QPS_ERR] = CMD_2ERR_QPEE, [IB_QPS_RTS] = CMD_SQD2RTS_QPEE, [IB_QPS_SQD] = CMD_SQD2SQD_QPEE, }, [IB_QPS_SQE] = { [IB_QPS_RESET] = CMD_ERR2RST_QPEE, [IB_QPS_ERR] = CMD_2ERR_QPEE, [IB_QPS_RTS] = CMD_SQERR2RTS_QPEE, }, [IB_QPS_ERR] = { [IB_QPS_RESET] = CMD_ERR2RST_QPEE, [IB_QPS_ERR] = CMD_2ERR_QPEE, } }; u8 op_mod = 0; int my_mailbox = 0; int err; if (op[cur][next] == CMD_ERR2RST_QPEE) { op_mod = 3; /* don't write outbox, any->reset */ /* For debugging */ if (!mailbox) { mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (!IS_ERR(mailbox)) { my_mailbox = 1; op_mod = 2; /* write outbox, any->reset */ } else mailbox = NULL; } err = mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, (!!is_ee << 24) | num, op_mod, op[cur][next], CMD_TIME_CLASS_C); if (0 && mailbox) { int i; mthca_dbg(dev, "Dumping QP context:\n"); printk(" %08x\n", be32_to_cpup(mailbox->buf)); for (i = 0; i < 0x100 / 4; ++i) { if (i % 8 == 0) printk("[%02x] ", i * 4); printk(" %08x", be32_to_cpu(((__be32 *) mailbox->buf)[i + 2])); if ((i + 1) % 8 == 0) printk("\n"); } } if (my_mailbox) mthca_free_mailbox(dev, mailbox); } else { if (0) { int i; mthca_dbg(dev, "Dumping QP context:\n"); printk(" opt param mask: %08x\n", be32_to_cpup(mailbox->buf)); for (i = 0; i < 0x100 / 4; ++i) { if (i % 8 == 0) printk(" [%02x] ", i * 4); printk(" %08x", be32_to_cpu(((__be32 *) mailbox->buf)[i + 2])); if ((i + 1) % 8 == 0) printk("\n"); } } err = mthca_cmd(dev, mailbox->dma, optmask | (!!is_ee << 24) | num, op_mod, op[cur][next], CMD_TIME_CLASS_C); } return err; } int mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee, struct mthca_mailbox *mailbox) { return mthca_cmd_box(dev, 0, mailbox->dma, (!!is_ee << 24) | num, 0, CMD_QUERY_QPEE, CMD_TIME_CLASS_A); } int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn) { u8 op_mod; switch (type) { case IB_QPT_SMI: op_mod = 0; break; case IB_QPT_GSI: op_mod = 1; break; case IB_QPT_RAW_IPV6: op_mod = 2; break; case IB_QPT_RAW_ETHERTYPE: op_mod = 3; break; default: return -EINVAL; } return mthca_cmd(dev, 0, qpn, op_mod, CMD_CONF_SPECIAL_QP, CMD_TIME_CLASS_B); } int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey, int port, const struct ib_wc *in_wc, const struct ib_grh *in_grh, const void *in_mad, void *response_mad) { struct mthca_mailbox *inmailbox, *outmailbox; void *inbox; int err; u32 in_modifier = port; u8 op_modifier = 0; #define MAD_IFC_BOX_SIZE 0x400 #define MAD_IFC_MY_QPN_OFFSET 0x100 #define MAD_IFC_RQPN_OFFSET 0x108 #define MAD_IFC_SL_OFFSET 0x10c #define MAD_IFC_G_PATH_OFFSET 0x10d #define MAD_IFC_RLID_OFFSET 0x10e #define MAD_IFC_PKEY_OFFSET 0x112 #define MAD_IFC_GRH_OFFSET 0x140 inmailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(inmailbox)) return PTR_ERR(inmailbox); inbox = inmailbox->buf; outmailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(outmailbox)) { mthca_free_mailbox(dev, inmailbox); return PTR_ERR(outmailbox); } memcpy(inbox, in_mad, 256); /* * Key check traps can't be generated unless we have in_wc to * tell us where to send the trap. */ if (ignore_mkey || !in_wc) op_modifier |= 0x1; if (ignore_bkey || !in_wc) op_modifier |= 0x2; if (in_wc) { u8 val; memset(inbox + 256, 0, 256); MTHCA_PUT(inbox, in_wc->qp->qp_num, MAD_IFC_MY_QPN_OFFSET); MTHCA_PUT(inbox, in_wc->src_qp, MAD_IFC_RQPN_OFFSET); val = in_wc->sl << 4; MTHCA_PUT(inbox, val, MAD_IFC_SL_OFFSET); val = in_wc->dlid_path_bits | (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0); MTHCA_PUT(inbox, val, MAD_IFC_G_PATH_OFFSET); MTHCA_PUT(inbox, ib_lid_cpu16(in_wc->slid), MAD_IFC_RLID_OFFSET); MTHCA_PUT(inbox, in_wc->pkey_index, MAD_IFC_PKEY_OFFSET); if (in_grh) memcpy(inbox + MAD_IFC_GRH_OFFSET, in_grh, 40); op_modifier |= 0x4; in_modifier |= ib_lid_cpu16(in_wc->slid) << 16; } err = mthca_cmd_box(dev, inmailbox->dma, outmailbox->dma, in_modifier, op_modifier, CMD_MAD_IFC, CMD_TIME_CLASS_C); if (!err) memcpy(response_mad, outmailbox->buf, 256); mthca_free_mailbox(dev, inmailbox); mthca_free_mailbox(dev, outmailbox); return err; } int mthca_READ_MGM(struct mthca_dev *dev, int index, struct mthca_mailbox *mailbox) { return mthca_cmd_box(dev, 0, mailbox->dma, index, 0, CMD_READ_MGM, CMD_TIME_CLASS_A); } int mthca_WRITE_MGM(struct mthca_dev *dev, int index, struct mthca_mailbox *mailbox) { return mthca_cmd(dev, mailbox->dma, index, 0, CMD_WRITE_MGM, CMD_TIME_CLASS_A); } int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox, u16 *hash) { u64 imm; int err; err = mthca_cmd_imm(dev, mailbox->dma, &imm, 0, 0, CMD_MGID_HASH, CMD_TIME_CLASS_A); *hash = imm; return err; } int mthca_NOP(struct mthca_dev *dev) { return mthca_cmd(dev, 0, 0x1f, 0, CMD_NOP, msecs_to_jiffies(100)); }
linux-master
drivers/infiniband/hw/mthca/mthca_cmd.c
/* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2005, 2006 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * Copyright (c) 2004 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/gfp.h> #include <linux/hardirq.h> #include <linux/sched.h> #include <asm/io.h> #include <rdma/ib_pack.h> #include "mthca_dev.h" #include "mthca_cmd.h" #include "mthca_memfree.h" enum { MTHCA_MAX_DIRECT_CQ_SIZE = 4 * PAGE_SIZE }; enum { MTHCA_CQ_ENTRY_SIZE = 0x20 }; enum { MTHCA_ATOMIC_BYTE_LEN = 8 }; /* * Must be packed because start is 64 bits but only aligned to 32 bits. */ struct mthca_cq_context { __be32 flags; __be64 start; __be32 logsize_usrpage; __be32 error_eqn; /* Tavor only */ __be32 comp_eqn; __be32 pd; __be32 lkey; __be32 last_notified_index; __be32 solicit_producer_index; __be32 consumer_index; __be32 producer_index; __be32 cqn; __be32 ci_db; /* Arbel only */ __be32 state_db; /* Arbel only */ u32 reserved; } __packed; #define MTHCA_CQ_STATUS_OK ( 0 << 28) #define MTHCA_CQ_STATUS_OVERFLOW ( 9 << 28) #define MTHCA_CQ_STATUS_WRITE_FAIL (10 << 28) #define MTHCA_CQ_FLAG_TR ( 1 << 18) #define MTHCA_CQ_FLAG_OI ( 1 << 17) #define MTHCA_CQ_STATE_DISARMED ( 0 << 8) #define MTHCA_CQ_STATE_ARMED ( 1 << 8) #define MTHCA_CQ_STATE_ARMED_SOL ( 4 << 8) #define MTHCA_EQ_STATE_FIRED (10 << 8) enum { MTHCA_ERROR_CQE_OPCODE_MASK = 0xfe }; enum { SYNDROME_LOCAL_LENGTH_ERR = 0x01, SYNDROME_LOCAL_QP_OP_ERR = 0x02, SYNDROME_LOCAL_EEC_OP_ERR = 0x03, SYNDROME_LOCAL_PROT_ERR = 0x04, SYNDROME_WR_FLUSH_ERR = 0x05, SYNDROME_MW_BIND_ERR = 0x06, SYNDROME_BAD_RESP_ERR = 0x10, SYNDROME_LOCAL_ACCESS_ERR = 0x11, SYNDROME_REMOTE_INVAL_REQ_ERR = 0x12, SYNDROME_REMOTE_ACCESS_ERR = 0x13, SYNDROME_REMOTE_OP_ERR = 0x14, SYNDROME_RETRY_EXC_ERR = 0x15, SYNDROME_RNR_RETRY_EXC_ERR = 0x16, SYNDROME_LOCAL_RDD_VIOL_ERR = 0x20, SYNDROME_REMOTE_INVAL_RD_REQ_ERR = 0x21, SYNDROME_REMOTE_ABORTED_ERR = 0x22, SYNDROME_INVAL_EECN_ERR = 0x23, SYNDROME_INVAL_EEC_STATE_ERR = 0x24 }; struct mthca_cqe { __be32 my_qpn; __be32 my_ee; __be32 rqpn; u8 sl_ipok; u8 g_mlpath; __be16 rlid; __be32 imm_etype_pkey_eec; __be32 byte_cnt; __be32 wqe; u8 opcode; u8 is_send; u8 reserved; u8 owner; }; struct mthca_err_cqe { __be32 my_qpn; u32 reserved1[3]; u8 syndrome; u8 vendor_err; __be16 db_cnt; u32 reserved2; __be32 wqe; u8 opcode; u8 reserved3[2]; u8 owner; }; #define MTHCA_CQ_ENTRY_OWNER_SW (0 << 7) #define MTHCA_CQ_ENTRY_OWNER_HW (1 << 7) #define MTHCA_TAVOR_CQ_DB_INC_CI (1 << 24) #define MTHCA_TAVOR_CQ_DB_REQ_NOT (2 << 24) #define MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL (3 << 24) #define MTHCA_TAVOR_CQ_DB_SET_CI (4 << 24) #define MTHCA_TAVOR_CQ_DB_REQ_NOT_MULT (5 << 24) #define MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL (1 << 24) #define MTHCA_ARBEL_CQ_DB_REQ_NOT (2 << 24) #define MTHCA_ARBEL_CQ_DB_REQ_NOT_MULT (3 << 24) static inline struct mthca_cqe *get_cqe_from_buf(struct mthca_cq_buf *buf, int entry) { if (buf->is_direct) return buf->queue.direct.buf + (entry * MTHCA_CQ_ENTRY_SIZE); else return buf->queue.page_list[entry * MTHCA_CQ_ENTRY_SIZE / PAGE_SIZE].buf + (entry * MTHCA_CQ_ENTRY_SIZE) % PAGE_SIZE; } static inline struct mthca_cqe *get_cqe(struct mthca_cq *cq, int entry) { return get_cqe_from_buf(&cq->buf, entry); } static inline struct mthca_cqe *cqe_sw(struct mthca_cqe *cqe) { return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe; } static inline struct mthca_cqe *next_cqe_sw(struct mthca_cq *cq) { return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe)); } static inline void set_cqe_hw(struct mthca_cqe *cqe) { cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW; } static void dump_cqe(struct mthca_dev *dev, void *cqe_ptr) { __be32 *cqe = cqe_ptr; (void) cqe; /* avoid warning if mthca_dbg compiled away... */ mthca_dbg(dev, "CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n", be32_to_cpu(cqe[0]), be32_to_cpu(cqe[1]), be32_to_cpu(cqe[2]), be32_to_cpu(cqe[3]), be32_to_cpu(cqe[4]), be32_to_cpu(cqe[5]), be32_to_cpu(cqe[6]), be32_to_cpu(cqe[7])); } /* * incr is ignored in native Arbel (mem-free) mode, so cq->cons_index * should be correct before calling update_cons_index(). */ static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq, int incr) { if (mthca_is_memfree(dev)) { *cq->set_ci_db = cpu_to_be32(cq->cons_index); wmb(); } else { mthca_write64(MTHCA_TAVOR_CQ_DB_INC_CI | cq->cqn, incr - 1, dev->kar + MTHCA_CQ_DOORBELL, MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); } } void mthca_cq_completion(struct mthca_dev *dev, u32 cqn) { struct mthca_cq *cq; cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); if (!cq) { mthca_warn(dev, "Completion event for bogus CQ %08x\n", cqn); return; } ++cq->arm_sn; cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); } void mthca_cq_event(struct mthca_dev *dev, u32 cqn, enum ib_event_type event_type) { struct mthca_cq *cq; struct ib_event event; spin_lock(&dev->cq_table.lock); cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); if (cq) ++cq->refcount; spin_unlock(&dev->cq_table.lock); if (!cq) { mthca_warn(dev, "Async event for bogus CQ %08x\n", cqn); return; } event.device = &dev->ib_dev; event.event = event_type; event.element.cq = &cq->ibcq; if (cq->ibcq.event_handler) cq->ibcq.event_handler(&event, cq->ibcq.cq_context); spin_lock(&dev->cq_table.lock); if (!--cq->refcount) wake_up(&cq->wait); spin_unlock(&dev->cq_table.lock); } static inline int is_recv_cqe(struct mthca_cqe *cqe) { if ((cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) == MTHCA_ERROR_CQE_OPCODE_MASK) return !(cqe->opcode & 0x01); else return !(cqe->is_send & 0x80); } void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn, struct mthca_srq *srq) { struct mthca_cqe *cqe; u32 prod_index; int i, nfreed = 0; spin_lock_irq(&cq->lock); /* * First we need to find the current producer index, so we * know where to start cleaning from. It doesn't matter if HW * adds new entries after this loop -- the QP we're worried * about is already in RESET, so the new entries won't come * from our QP and therefore don't need to be checked. */ for (prod_index = cq->cons_index; cqe_sw(get_cqe(cq, prod_index & cq->ibcq.cqe)); ++prod_index) if (prod_index == cq->cons_index + cq->ibcq.cqe) break; if (0) mthca_dbg(dev, "Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n", qpn, cq->cqn, cq->cons_index, prod_index); /* * Now sweep backwards through the CQ, removing CQ entries * that match our QP by copying older entries on top of them. */ while ((int) --prod_index - (int) cq->cons_index >= 0) { cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); if (cqe->my_qpn == cpu_to_be32(qpn)) { if (srq && is_recv_cqe(cqe)) mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe)); ++nfreed; } else if (nfreed) memcpy(get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe), cqe, MTHCA_CQ_ENTRY_SIZE); } if (nfreed) { for (i = 0; i < nfreed; ++i) set_cqe_hw(get_cqe(cq, (cq->cons_index + i) & cq->ibcq.cqe)); wmb(); cq->cons_index += nfreed; update_cons_index(dev, cq, nfreed); } spin_unlock_irq(&cq->lock); } void mthca_cq_resize_copy_cqes(struct mthca_cq *cq) { int i; /* * In Tavor mode, the hardware keeps the consumer and producer * indices mod the CQ size. Since we might be making the CQ * bigger, we need to deal with the case where the producer * index wrapped around before the CQ was resized. */ if (!mthca_is_memfree(to_mdev(cq->ibcq.device)) && cq->ibcq.cqe < cq->resize_buf->cqe) { cq->cons_index &= cq->ibcq.cqe; if (cqe_sw(get_cqe(cq, cq->ibcq.cqe))) cq->cons_index -= cq->ibcq.cqe + 1; } for (i = cq->cons_index; cqe_sw(get_cqe(cq, i & cq->ibcq.cqe)); ++i) memcpy(get_cqe_from_buf(&cq->resize_buf->buf, i & cq->resize_buf->cqe), get_cqe(cq, i & cq->ibcq.cqe), MTHCA_CQ_ENTRY_SIZE); } int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent) { int ret; int i; ret = mthca_buf_alloc(dev, nent * MTHCA_CQ_ENTRY_SIZE, MTHCA_MAX_DIRECT_CQ_SIZE, &buf->queue, &buf->is_direct, &dev->driver_pd, 1, &buf->mr); if (ret) return ret; for (i = 0; i < nent; ++i) set_cqe_hw(get_cqe_from_buf(buf, i)); return 0; } void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int cqe) { mthca_buf_free(dev, (cqe + 1) * MTHCA_CQ_ENTRY_SIZE, &buf->queue, buf->is_direct, &buf->mr); } static void handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq, struct mthca_qp *qp, int wqe_index, int is_send, struct mthca_err_cqe *cqe, struct ib_wc *entry, int *free_cqe) { int dbd; __be32 new_wqe; if (cqe->syndrome == SYNDROME_LOCAL_QP_OP_ERR) { mthca_dbg(dev, "local QP operation err " "(QPN %06x, WQE @ %08x, CQN %06x, index %d)\n", be32_to_cpu(cqe->my_qpn), be32_to_cpu(cqe->wqe), cq->cqn, cq->cons_index); dump_cqe(dev, cqe); } /* * For completions in error, only work request ID, status, vendor error * (and freed resource count for RD) have to be set. */ switch (cqe->syndrome) { case SYNDROME_LOCAL_LENGTH_ERR: entry->status = IB_WC_LOC_LEN_ERR; break; case SYNDROME_LOCAL_QP_OP_ERR: entry->status = IB_WC_LOC_QP_OP_ERR; break; case SYNDROME_LOCAL_EEC_OP_ERR: entry->status = IB_WC_LOC_EEC_OP_ERR; break; case SYNDROME_LOCAL_PROT_ERR: entry->status = IB_WC_LOC_PROT_ERR; break; case SYNDROME_WR_FLUSH_ERR: entry->status = IB_WC_WR_FLUSH_ERR; break; case SYNDROME_MW_BIND_ERR: entry->status = IB_WC_MW_BIND_ERR; break; case SYNDROME_BAD_RESP_ERR: entry->status = IB_WC_BAD_RESP_ERR; break; case SYNDROME_LOCAL_ACCESS_ERR: entry->status = IB_WC_LOC_ACCESS_ERR; break; case SYNDROME_REMOTE_INVAL_REQ_ERR: entry->status = IB_WC_REM_INV_REQ_ERR; break; case SYNDROME_REMOTE_ACCESS_ERR: entry->status = IB_WC_REM_ACCESS_ERR; break; case SYNDROME_REMOTE_OP_ERR: entry->status = IB_WC_REM_OP_ERR; break; case SYNDROME_RETRY_EXC_ERR: entry->status = IB_WC_RETRY_EXC_ERR; break; case SYNDROME_RNR_RETRY_EXC_ERR: entry->status = IB_WC_RNR_RETRY_EXC_ERR; break; case SYNDROME_LOCAL_RDD_VIOL_ERR: entry->status = IB_WC_LOC_RDD_VIOL_ERR; break; case SYNDROME_REMOTE_INVAL_RD_REQ_ERR: entry->status = IB_WC_REM_INV_RD_REQ_ERR; break; case SYNDROME_REMOTE_ABORTED_ERR: entry->status = IB_WC_REM_ABORT_ERR; break; case SYNDROME_INVAL_EECN_ERR: entry->status = IB_WC_INV_EECN_ERR; break; case SYNDROME_INVAL_EEC_STATE_ERR: entry->status = IB_WC_INV_EEC_STATE_ERR; break; default: entry->status = IB_WC_GENERAL_ERR; break; } entry->vendor_err = cqe->vendor_err; /* * Mem-free HCAs always generate one CQE per WQE, even in the * error case, so we don't have to check the doorbell count, etc. */ if (mthca_is_memfree(dev)) return; mthca_free_err_wqe(dev, qp, is_send, wqe_index, &dbd, &new_wqe); /* * If we're at the end of the WQE chain, or we've used up our * doorbell count, free the CQE. Otherwise just update it for * the next poll operation. */ if (!(new_wqe & cpu_to_be32(0x3f)) || (!cqe->db_cnt && dbd)) return; be16_add_cpu(&cqe->db_cnt, -dbd); cqe->wqe = new_wqe; cqe->syndrome = SYNDROME_WR_FLUSH_ERR; *free_cqe = 0; } static inline int mthca_poll_one(struct mthca_dev *dev, struct mthca_cq *cq, struct mthca_qp **cur_qp, int *freed, struct ib_wc *entry) { struct mthca_wq *wq; struct mthca_cqe *cqe; int wqe_index; int is_error; int is_send; int free_cqe = 1; int err = 0; u16 checksum; cqe = next_cqe_sw(cq); if (!cqe) return -EAGAIN; /* * Make sure we read CQ entry contents after we've checked the * ownership bit. */ rmb(); if (0) { mthca_dbg(dev, "%x/%d: CQE -> QPN %06x, WQE @ %08x\n", cq->cqn, cq->cons_index, be32_to_cpu(cqe->my_qpn), be32_to_cpu(cqe->wqe)); dump_cqe(dev, cqe); } is_error = (cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) == MTHCA_ERROR_CQE_OPCODE_MASK; is_send = is_error ? cqe->opcode & 0x01 : cqe->is_send & 0x80; if (!*cur_qp || be32_to_cpu(cqe->my_qpn) != (*cur_qp)->qpn) { /* * We do not have to take the QP table lock here, * because CQs will be locked while QPs are removed * from the table. */ *cur_qp = mthca_array_get(&dev->qp_table.qp, be32_to_cpu(cqe->my_qpn) & (dev->limits.num_qps - 1)); if (!*cur_qp) { mthca_warn(dev, "CQ entry for unknown QP %06x\n", be32_to_cpu(cqe->my_qpn) & 0xffffff); err = -EINVAL; goto out; } } entry->qp = &(*cur_qp)->ibqp; if (is_send) { wq = &(*cur_qp)->sq; wqe_index = ((be32_to_cpu(cqe->wqe) - (*cur_qp)->send_wqe_offset) >> wq->wqe_shift); entry->wr_id = (*cur_qp)->wrid[wqe_index + (*cur_qp)->rq.max]; } else if ((*cur_qp)->ibqp.srq) { struct mthca_srq *srq = to_msrq((*cur_qp)->ibqp.srq); u32 wqe = be32_to_cpu(cqe->wqe); wq = NULL; wqe_index = wqe >> srq->wqe_shift; entry->wr_id = srq->wrid[wqe_index]; mthca_free_srq_wqe(srq, wqe); } else { s32 wqe; wq = &(*cur_qp)->rq; wqe = be32_to_cpu(cqe->wqe); wqe_index = wqe >> wq->wqe_shift; /* * WQE addr == base - 1 might be reported in receive completion * with error instead of (rq size - 1) by Sinai FW 1.0.800 and * Arbel FW 5.1.400. This bug should be fixed in later FW revs. */ if (unlikely(wqe_index < 0)) wqe_index = wq->max - 1; entry->wr_id = (*cur_qp)->wrid[wqe_index]; } if (wq) { if (wq->last_comp < wqe_index) wq->tail += wqe_index - wq->last_comp; else wq->tail += wqe_index + wq->max - wq->last_comp; wq->last_comp = wqe_index; } if (is_error) { handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send, (struct mthca_err_cqe *) cqe, entry, &free_cqe); goto out; } if (is_send) { entry->wc_flags = 0; switch (cqe->opcode) { case MTHCA_OPCODE_RDMA_WRITE: entry->opcode = IB_WC_RDMA_WRITE; break; case MTHCA_OPCODE_RDMA_WRITE_IMM: entry->opcode = IB_WC_RDMA_WRITE; entry->wc_flags |= IB_WC_WITH_IMM; break; case MTHCA_OPCODE_SEND: entry->opcode = IB_WC_SEND; break; case MTHCA_OPCODE_SEND_IMM: entry->opcode = IB_WC_SEND; entry->wc_flags |= IB_WC_WITH_IMM; break; case MTHCA_OPCODE_RDMA_READ: entry->opcode = IB_WC_RDMA_READ; entry->byte_len = be32_to_cpu(cqe->byte_cnt); break; case MTHCA_OPCODE_ATOMIC_CS: entry->opcode = IB_WC_COMP_SWAP; entry->byte_len = MTHCA_ATOMIC_BYTE_LEN; break; case MTHCA_OPCODE_ATOMIC_FA: entry->opcode = IB_WC_FETCH_ADD; entry->byte_len = MTHCA_ATOMIC_BYTE_LEN; break; default: entry->opcode = 0xFF; break; } } else { entry->byte_len = be32_to_cpu(cqe->byte_cnt); switch (cqe->opcode & 0x1f) { case IB_OPCODE_SEND_LAST_WITH_IMMEDIATE: case IB_OPCODE_SEND_ONLY_WITH_IMMEDIATE: entry->wc_flags = IB_WC_WITH_IMM; entry->ex.imm_data = cqe->imm_etype_pkey_eec; entry->opcode = IB_WC_RECV; break; case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE: case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE: entry->wc_flags = IB_WC_WITH_IMM; entry->ex.imm_data = cqe->imm_etype_pkey_eec; entry->opcode = IB_WC_RECV_RDMA_WITH_IMM; break; default: entry->wc_flags = 0; entry->opcode = IB_WC_RECV; break; } entry->slid = be16_to_cpu(cqe->rlid); entry->sl = cqe->sl_ipok >> 4; entry->src_qp = be32_to_cpu(cqe->rqpn) & 0xffffff; entry->dlid_path_bits = cqe->g_mlpath & 0x7f; entry->pkey_index = be32_to_cpu(cqe->imm_etype_pkey_eec) >> 16; entry->wc_flags |= cqe->g_mlpath & 0x80 ? IB_WC_GRH : 0; checksum = (be32_to_cpu(cqe->rqpn) >> 24) | ((be32_to_cpu(cqe->my_ee) >> 16) & 0xff00); entry->wc_flags |= (cqe->sl_ipok & 1 && checksum == 0xffff) ? IB_WC_IP_CSUM_OK : 0; } entry->status = IB_WC_SUCCESS; out: if (likely(free_cqe)) { set_cqe_hw(cqe); ++(*freed); ++cq->cons_index; } return err; } int mthca_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) { struct mthca_dev *dev = to_mdev(ibcq->device); struct mthca_cq *cq = to_mcq(ibcq); struct mthca_qp *qp = NULL; unsigned long flags; int err = 0; int freed = 0; int npolled; spin_lock_irqsave(&cq->lock, flags); npolled = 0; repoll: while (npolled < num_entries) { err = mthca_poll_one(dev, cq, &qp, &freed, entry + npolled); if (err) break; ++npolled; } if (freed) { wmb(); update_cons_index(dev, cq, freed); } /* * If a CQ resize is in progress and we discovered that the * old buffer is empty, then peek in the new buffer, and if * it's not empty, switch to the new buffer and continue * polling there. */ if (unlikely(err == -EAGAIN && cq->resize_buf && cq->resize_buf->state == CQ_RESIZE_READY)) { /* * In Tavor mode, the hardware keeps the producer * index modulo the CQ size. Since we might be making * the CQ bigger, we need to mask our consumer index * using the size of the old CQ buffer before looking * in the new CQ buffer. */ if (!mthca_is_memfree(dev)) cq->cons_index &= cq->ibcq.cqe; if (cqe_sw(get_cqe_from_buf(&cq->resize_buf->buf, cq->cons_index & cq->resize_buf->cqe))) { struct mthca_cq_buf tbuf; int tcqe; tbuf = cq->buf; tcqe = cq->ibcq.cqe; cq->buf = cq->resize_buf->buf; cq->ibcq.cqe = cq->resize_buf->cqe; cq->resize_buf->buf = tbuf; cq->resize_buf->cqe = tcqe; cq->resize_buf->state = CQ_RESIZE_SWAPPED; goto repoll; } } spin_unlock_irqrestore(&cq->lock, flags); return err == 0 || err == -EAGAIN ? npolled : err; } int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags) { u32 dbhi = ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL : MTHCA_TAVOR_CQ_DB_REQ_NOT) | to_mcq(cq)->cqn; mthca_write64(dbhi, 0xffffffff, to_mdev(cq->device)->kar + MTHCA_CQ_DOORBELL, MTHCA_GET_DOORBELL_LOCK(&to_mdev(cq->device)->doorbell_lock)); return 0; } int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) { struct mthca_cq *cq = to_mcq(ibcq); __be32 db_rec[2]; u32 dbhi; u32 sn = cq->arm_sn & 3; db_rec[0] = cpu_to_be32(cq->cons_index); db_rec[1] = cpu_to_be32((cq->cqn << 8) | (2 << 5) | (sn << 3) | ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? 1 : 2)); mthca_write_db_rec(db_rec, cq->arm_db); /* * Make sure that the doorbell record in host memory is * written before ringing the doorbell via PCI MMIO. */ wmb(); dbhi = (sn << 28) | ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL : MTHCA_ARBEL_CQ_DB_REQ_NOT) | cq->cqn; mthca_write64(dbhi, cq->cons_index, to_mdev(ibcq->device)->kar + MTHCA_CQ_DOORBELL, MTHCA_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->doorbell_lock)); return 0; } int mthca_init_cq(struct mthca_dev *dev, int nent, struct mthca_ucontext *ctx, u32 pdn, struct mthca_cq *cq) { struct mthca_mailbox *mailbox; struct mthca_cq_context *cq_context; int err = -ENOMEM; cq->ibcq.cqe = nent - 1; cq->is_kernel = !ctx; cq->cqn = mthca_alloc(&dev->cq_table.alloc); if (cq->cqn == -1) return -ENOMEM; if (mthca_is_memfree(dev)) { err = mthca_table_get(dev, dev->cq_table.table, cq->cqn); if (err) goto err_out; if (cq->is_kernel) { cq->arm_sn = 1; err = -ENOMEM; cq->set_ci_db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->cqn, &cq->set_ci_db); if (cq->set_ci_db_index < 0) goto err_out_icm; cq->arm_db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->cqn, &cq->arm_db); if (cq->arm_db_index < 0) goto err_out_ci; } } mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) { err = PTR_ERR(mailbox); goto err_out_arm; } cq_context = mailbox->buf; if (cq->is_kernel) { err = mthca_alloc_cq_buf(dev, &cq->buf, nent); if (err) goto err_out_mailbox; } spin_lock_init(&cq->lock); cq->refcount = 1; init_waitqueue_head(&cq->wait); mutex_init(&cq->mutex); memset(cq_context, 0, sizeof *cq_context); cq_context->flags = cpu_to_be32(MTHCA_CQ_STATUS_OK | MTHCA_CQ_STATE_DISARMED | MTHCA_CQ_FLAG_TR); cq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24); if (ctx) cq_context->logsize_usrpage |= cpu_to_be32(ctx->uar.index); else cq_context->logsize_usrpage |= cpu_to_be32(dev->driver_uar.index); cq_context->error_eqn = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn); cq_context->comp_eqn = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_COMP].eqn); cq_context->pd = cpu_to_be32(pdn); cq_context->lkey = cpu_to_be32(cq->buf.mr.ibmr.lkey); cq_context->cqn = cpu_to_be32(cq->cqn); if (mthca_is_memfree(dev)) { cq_context->ci_db = cpu_to_be32(cq->set_ci_db_index); cq_context->state_db = cpu_to_be32(cq->arm_db_index); } err = mthca_SW2HW_CQ(dev, mailbox, cq->cqn); if (err) { mthca_warn(dev, "SW2HW_CQ failed (%d)\n", err); goto err_out_free_mr; } spin_lock_irq(&dev->cq_table.lock); err = mthca_array_set(&dev->cq_table.cq, cq->cqn & (dev->limits.num_cqs - 1), cq); if (err) { spin_unlock_irq(&dev->cq_table.lock); goto err_out_free_mr; } spin_unlock_irq(&dev->cq_table.lock); cq->cons_index = 0; mthca_free_mailbox(dev, mailbox); return 0; err_out_free_mr: if (cq->is_kernel) mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); err_out_mailbox: mthca_free_mailbox(dev, mailbox); err_out_arm: if (cq->is_kernel && mthca_is_memfree(dev)) mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index); err_out_ci: if (cq->is_kernel && mthca_is_memfree(dev)) mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index); err_out_icm: mthca_table_put(dev, dev->cq_table.table, cq->cqn); err_out: mthca_free(&dev->cq_table.alloc, cq->cqn); return err; } static inline int get_cq_refcount(struct mthca_dev *dev, struct mthca_cq *cq) { int c; spin_lock_irq(&dev->cq_table.lock); c = cq->refcount; spin_unlock_irq(&dev->cq_table.lock); return c; } void mthca_free_cq(struct mthca_dev *dev, struct mthca_cq *cq) { struct mthca_mailbox *mailbox; int err; mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) { mthca_warn(dev, "No memory for mailbox to free CQ.\n"); return; } err = mthca_HW2SW_CQ(dev, mailbox, cq->cqn); if (err) mthca_warn(dev, "HW2SW_CQ failed (%d)\n", err); if (0) { __be32 *ctx = mailbox->buf; int j; printk(KERN_ERR "context for CQN %x (cons index %x, next sw %d)\n", cq->cqn, cq->cons_index, cq->is_kernel ? !!next_cqe_sw(cq) : 0); for (j = 0; j < 16; ++j) printk(KERN_ERR "[%2x] %08x\n", j * 4, be32_to_cpu(ctx[j])); } spin_lock_irq(&dev->cq_table.lock); mthca_array_clear(&dev->cq_table.cq, cq->cqn & (dev->limits.num_cqs - 1)); --cq->refcount; spin_unlock_irq(&dev->cq_table.lock); if (dev->mthca_flags & MTHCA_FLAG_MSI_X) synchronize_irq(dev->eq_table.eq[MTHCA_EQ_COMP].msi_x_vector); else synchronize_irq(dev->pdev->irq); wait_event(cq->wait, !get_cq_refcount(dev, cq)); if (cq->is_kernel) { mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); if (mthca_is_memfree(dev)) { mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index); mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index); } } mthca_table_put(dev, dev->cq_table.table, cq->cqn); mthca_free(&dev->cq_table.alloc, cq->cqn); mthca_free_mailbox(dev, mailbox); } int mthca_init_cq_table(struct mthca_dev *dev) { int err; spin_lock_init(&dev->cq_table.lock); err = mthca_alloc_init(&dev->cq_table.alloc, dev->limits.num_cqs, (1 << 24) - 1, dev->limits.reserved_cqs); if (err) return err; err = mthca_array_init(&dev->cq_table.cq, dev->limits.num_cqs); if (err) mthca_alloc_cleanup(&dev->cq_table.alloc); return err; } void mthca_cleanup_cq_table(struct mthca_dev *dev) { mthca_array_cleanup(&dev->cq_table.cq, dev->limits.num_cqs); mthca_alloc_cleanup(&dev->cq_table.alloc); }
linux-master
drivers/infiniband/hw/mthca/mthca_cq.c
/* * Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2005 Cisco Systems. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/errno.h> #include "mthca_dev.h" int mthca_pd_alloc(struct mthca_dev *dev, int privileged, struct mthca_pd *pd) { int err = 0; pd->privileged = privileged; atomic_set(&pd->sqp_count, 0); pd->pd_num = mthca_alloc(&dev->pd_table.alloc); if (pd->pd_num == -1) return -ENOMEM; if (privileged) { err = mthca_mr_alloc_notrans(dev, pd->pd_num, MTHCA_MPT_FLAG_LOCAL_READ | MTHCA_MPT_FLAG_LOCAL_WRITE, &pd->ntmr); if (err) mthca_free(&dev->pd_table.alloc, pd->pd_num); } return err; } void mthca_pd_free(struct mthca_dev *dev, struct mthca_pd *pd) { if (pd->privileged) mthca_free_mr(dev, &pd->ntmr); mthca_free(&dev->pd_table.alloc, pd->pd_num); } int mthca_init_pd_table(struct mthca_dev *dev) { return mthca_alloc_init(&dev->pd_table.alloc, dev->limits.num_pds, (1 << 24) - 1, dev->limits.reserved_pds); } void mthca_cleanup_pd_table(struct mthca_dev *dev) { /* XXX check if any PDs are still allocated? */ mthca_alloc_cleanup(&dev->pd_table.alloc); }
linux-master
drivers/infiniband/hw/mthca/mthca_pd.c
/* * Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/slab.h> #include <linux/errno.h> #include "mthca_dev.h" #include "mthca_cmd.h" #include "mthca_memfree.h" struct mthca_mtt { struct mthca_buddy *buddy; int order; u32 first_seg; }; /* * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits. */ struct mthca_mpt_entry { __be32 flags; __be32 page_size; __be32 key; __be32 pd; __be64 start; __be64 length; __be32 lkey; __be32 window_count; __be32 window_count_limit; __be64 mtt_seg; __be32 mtt_sz; /* Arbel only */ u32 reserved[2]; } __packed; #define MTHCA_MPT_FLAG_SW_OWNS (0xfUL << 28) #define MTHCA_MPT_FLAG_MIO (1 << 17) #define MTHCA_MPT_FLAG_BIND_ENABLE (1 << 15) #define MTHCA_MPT_FLAG_PHYSICAL (1 << 9) #define MTHCA_MPT_FLAG_REGION (1 << 8) #define MTHCA_MTT_FLAG_PRESENT 1 #define MTHCA_MPT_STATUS_SW 0xF0 #define MTHCA_MPT_STATUS_HW 0x00 #define SINAI_FMR_KEY_INC 0x1000000 /* * Buddy allocator for MTT segments (currently not very efficient * since it doesn't keep a free list and just searches linearly * through the bitmaps) */ static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order) { int o; int m; u32 seg; spin_lock(&buddy->lock); for (o = order; o <= buddy->max_order; ++o) if (buddy->num_free[o]) { m = 1 << (buddy->max_order - o); seg = find_first_bit(buddy->bits[o], m); if (seg < m) goto found; } spin_unlock(&buddy->lock); return -1; found: __clear_bit(seg, buddy->bits[o]); --buddy->num_free[o]; while (o > order) { --o; seg <<= 1; __set_bit(seg ^ 1, buddy->bits[o]); ++buddy->num_free[o]; } spin_unlock(&buddy->lock); seg <<= order; return seg; } static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order) { seg >>= order; spin_lock(&buddy->lock); while (test_bit(seg ^ 1, buddy->bits[order])) { __clear_bit(seg ^ 1, buddy->bits[order]); --buddy->num_free[order]; seg >>= 1; ++order; } __set_bit(seg, buddy->bits[order]); ++buddy->num_free[order]; spin_unlock(&buddy->lock); } static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order) { int i; buddy->max_order = max_order; spin_lock_init(&buddy->lock); buddy->bits = kcalloc(buddy->max_order + 1, sizeof(long *), GFP_KERNEL); buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free, GFP_KERNEL); if (!buddy->bits || !buddy->num_free) goto err_out; for (i = 0; i <= buddy->max_order; ++i) { buddy->bits[i] = bitmap_zalloc(1 << (buddy->max_order - i), GFP_KERNEL); if (!buddy->bits[i]) goto err_out_free; } __set_bit(0, buddy->bits[buddy->max_order]); buddy->num_free[buddy->max_order] = 1; return 0; err_out_free: for (i = 0; i <= buddy->max_order; ++i) bitmap_free(buddy->bits[i]); err_out: kfree(buddy->bits); kfree(buddy->num_free); return -ENOMEM; } static void mthca_buddy_cleanup(struct mthca_buddy *buddy) { int i; for (i = 0; i <= buddy->max_order; ++i) bitmap_free(buddy->bits[i]); kfree(buddy->bits); kfree(buddy->num_free); } static u32 mthca_alloc_mtt_range(struct mthca_dev *dev, int order, struct mthca_buddy *buddy) { u32 seg = mthca_buddy_alloc(buddy, order); if (seg == -1) return -1; if (mthca_is_memfree(dev)) if (mthca_table_get_range(dev, dev->mr_table.mtt_table, seg, seg + (1 << order) - 1)) { mthca_buddy_free(buddy, seg, order); seg = -1; } return seg; } static struct mthca_mtt *__mthca_alloc_mtt(struct mthca_dev *dev, int size, struct mthca_buddy *buddy) { struct mthca_mtt *mtt; int i; if (size <= 0) return ERR_PTR(-EINVAL); mtt = kmalloc(sizeof *mtt, GFP_KERNEL); if (!mtt) return ERR_PTR(-ENOMEM); mtt->buddy = buddy; mtt->order = 0; for (i = dev->limits.mtt_seg_size / 8; i < size; i <<= 1) ++mtt->order; mtt->first_seg = mthca_alloc_mtt_range(dev, mtt->order, buddy); if (mtt->first_seg == -1) { kfree(mtt); return ERR_PTR(-ENOMEM); } return mtt; } struct mthca_mtt *mthca_alloc_mtt(struct mthca_dev *dev, int size) { return __mthca_alloc_mtt(dev, size, &dev->mr_table.mtt_buddy); } void mthca_free_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt) { if (!mtt) return; mthca_buddy_free(mtt->buddy, mtt->first_seg, mtt->order); mthca_table_put_range(dev, dev->mr_table.mtt_table, mtt->first_seg, mtt->first_seg + (1 << mtt->order) - 1); kfree(mtt); } static int __mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt, int start_index, u64 *buffer_list, int list_len) { struct mthca_mailbox *mailbox; __be64 *mtt_entry; int err = 0; int i; mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); mtt_entry = mailbox->buf; while (list_len > 0) { mtt_entry[0] = cpu_to_be64(dev->mr_table.mtt_base + mtt->first_seg * dev->limits.mtt_seg_size + start_index * 8); mtt_entry[1] = 0; for (i = 0; i < list_len && i < MTHCA_MAILBOX_SIZE / 8 - 2; ++i) mtt_entry[i + 2] = cpu_to_be64(buffer_list[i] | MTHCA_MTT_FLAG_PRESENT); /* * If we have an odd number of entries to write, add * one more dummy entry for firmware efficiency. */ if (i & 1) mtt_entry[i + 2] = 0; err = mthca_WRITE_MTT(dev, mailbox, (i + 1) & ~1); if (err) { mthca_warn(dev, "WRITE_MTT failed (%d)\n", err); goto out; } list_len -= i; start_index += i; buffer_list += i; } out: mthca_free_mailbox(dev, mailbox); return err; } int mthca_write_mtt_size(struct mthca_dev *dev) { if (dev->mr_table.fmr_mtt_buddy != &dev->mr_table.mtt_buddy || !(dev->mthca_flags & MTHCA_FLAG_FMR)) /* * Be friendly to WRITE_MTT command * and leave two empty slots for the * index and reserved fields of the * mailbox. */ return PAGE_SIZE / sizeof (u64) - 2; /* For Arbel, all MTTs must fit in the same page. */ return mthca_is_memfree(dev) ? (PAGE_SIZE / sizeof (u64)) : 0x7ffffff; } static void mthca_tavor_write_mtt_seg(struct mthca_dev *dev, struct mthca_mtt *mtt, int start_index, u64 *buffer_list, int list_len) { u64 __iomem *mtts; int i; mtts = dev->mr_table.tavor_fmr.mtt_base + mtt->first_seg * dev->limits.mtt_seg_size + start_index * sizeof (u64); for (i = 0; i < list_len; ++i) mthca_write64_raw(cpu_to_be64(buffer_list[i] | MTHCA_MTT_FLAG_PRESENT), mtts + i); } static void mthca_arbel_write_mtt_seg(struct mthca_dev *dev, struct mthca_mtt *mtt, int start_index, u64 *buffer_list, int list_len) { __be64 *mtts; dma_addr_t dma_handle; int i; int s = start_index * sizeof (u64); /* For Arbel, all MTTs must fit in the same page. */ BUG_ON(s / PAGE_SIZE != (s + list_len * sizeof(u64) - 1) / PAGE_SIZE); /* Require full segments */ BUG_ON(s % dev->limits.mtt_seg_size); mtts = mthca_table_find(dev->mr_table.mtt_table, mtt->first_seg + s / dev->limits.mtt_seg_size, &dma_handle); BUG_ON(!mtts); dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle, list_len * sizeof (u64), DMA_TO_DEVICE); for (i = 0; i < list_len; ++i) mtts[i] = cpu_to_be64(buffer_list[i] | MTHCA_MTT_FLAG_PRESENT); dma_sync_single_for_device(&dev->pdev->dev, dma_handle, list_len * sizeof (u64), DMA_TO_DEVICE); } int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt, int start_index, u64 *buffer_list, int list_len) { int size = mthca_write_mtt_size(dev); int chunk; if (dev->mr_table.fmr_mtt_buddy != &dev->mr_table.mtt_buddy || !(dev->mthca_flags & MTHCA_FLAG_FMR)) return __mthca_write_mtt(dev, mtt, start_index, buffer_list, list_len); while (list_len > 0) { chunk = min(size, list_len); if (mthca_is_memfree(dev)) mthca_arbel_write_mtt_seg(dev, mtt, start_index, buffer_list, chunk); else mthca_tavor_write_mtt_seg(dev, mtt, start_index, buffer_list, chunk); list_len -= chunk; start_index += chunk; buffer_list += chunk; } return 0; } static inline u32 tavor_hw_index_to_key(u32 ind) { return ind; } static inline u32 tavor_key_to_hw_index(u32 key) { return key; } static inline u32 arbel_hw_index_to_key(u32 ind) { return (ind >> 24) | (ind << 8); } static inline u32 arbel_key_to_hw_index(u32 key) { return (key << 24) | (key >> 8); } static inline u32 hw_index_to_key(struct mthca_dev *dev, u32 ind) { if (mthca_is_memfree(dev)) return arbel_hw_index_to_key(ind); else return tavor_hw_index_to_key(ind); } static inline u32 key_to_hw_index(struct mthca_dev *dev, u32 key) { if (mthca_is_memfree(dev)) return arbel_key_to_hw_index(key); else return tavor_key_to_hw_index(key); } static inline u32 adjust_key(struct mthca_dev *dev, u32 key) { if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT) return ((key << 20) & 0x800000) | (key & 0x7fffff); else return key; } int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift, u64 iova, u64 total_size, u32 access, struct mthca_mr *mr) { struct mthca_mailbox *mailbox; struct mthca_mpt_entry *mpt_entry; u32 key; int i; int err; WARN_ON(buffer_size_shift >= 32); key = mthca_alloc(&dev->mr_table.mpt_alloc); if (key == -1) return -ENOMEM; key = adjust_key(dev, key); mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key); if (mthca_is_memfree(dev)) { err = mthca_table_get(dev, dev->mr_table.mpt_table, key); if (err) goto err_out_mpt_free; } mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) { err = PTR_ERR(mailbox); goto err_out_table; } mpt_entry = mailbox->buf; mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS | MTHCA_MPT_FLAG_MIO | MTHCA_MPT_FLAG_REGION | access); if (!mr->mtt) mpt_entry->flags |= cpu_to_be32(MTHCA_MPT_FLAG_PHYSICAL); mpt_entry->page_size = cpu_to_be32(buffer_size_shift - 12); mpt_entry->key = cpu_to_be32(key); mpt_entry->pd = cpu_to_be32(pd); mpt_entry->start = cpu_to_be64(iova); mpt_entry->length = cpu_to_be64(total_size); memset_startat(mpt_entry, 0, lkey); if (mr->mtt) mpt_entry->mtt_seg = cpu_to_be64(dev->mr_table.mtt_base + mr->mtt->first_seg * dev->limits.mtt_seg_size); if (0) { mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey); for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) { if (i % 4 == 0) printk("[%02x] ", i * 4); printk(" %08x", be32_to_cpu(((__be32 *) mpt_entry)[i])); if ((i + 1) % 4 == 0) printk("\n"); } } err = mthca_SW2HW_MPT(dev, mailbox, key & (dev->limits.num_mpts - 1)); if (err) { mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err); goto err_out_mailbox; } mthca_free_mailbox(dev, mailbox); return err; err_out_mailbox: mthca_free_mailbox(dev, mailbox); err_out_table: mthca_table_put(dev, dev->mr_table.mpt_table, key); err_out_mpt_free: mthca_free(&dev->mr_table.mpt_alloc, key); return err; } int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd, u32 access, struct mthca_mr *mr) { mr->mtt = NULL; return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr); } int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd, u64 *buffer_list, int buffer_size_shift, int list_len, u64 iova, u64 total_size, u32 access, struct mthca_mr *mr) { int err; mr->mtt = mthca_alloc_mtt(dev, list_len); if (IS_ERR(mr->mtt)) return PTR_ERR(mr->mtt); err = mthca_write_mtt(dev, mr->mtt, 0, buffer_list, list_len); if (err) { mthca_free_mtt(dev, mr->mtt); return err; } err = mthca_mr_alloc(dev, pd, buffer_size_shift, iova, total_size, access, mr); if (err) mthca_free_mtt(dev, mr->mtt); return err; } /* Free mr */ static void mthca_free_region(struct mthca_dev *dev, u32 lkey) { mthca_table_put(dev, dev->mr_table.mpt_table, key_to_hw_index(dev, lkey)); mthca_free(&dev->mr_table.mpt_alloc, key_to_hw_index(dev, lkey)); } void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr) { int err; err = mthca_HW2SW_MPT(dev, NULL, key_to_hw_index(dev, mr->ibmr.lkey) & (dev->limits.num_mpts - 1)); if (err) mthca_warn(dev, "HW2SW_MPT failed (%d)\n", err); mthca_free_region(dev, mr->ibmr.lkey); mthca_free_mtt(dev, mr->mtt); } int mthca_init_mr_table(struct mthca_dev *dev) { phys_addr_t addr; int mpts, mtts, err, i; err = mthca_alloc_init(&dev->mr_table.mpt_alloc, dev->limits.num_mpts, ~0, dev->limits.reserved_mrws); if (err) return err; if (!mthca_is_memfree(dev) && (dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) dev->limits.fmr_reserved_mtts = 0; else dev->mthca_flags |= MTHCA_FLAG_FMR; if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT) mthca_dbg(dev, "Memory key throughput optimization activated.\n"); err = mthca_buddy_init(&dev->mr_table.mtt_buddy, fls(dev->limits.num_mtt_segs - 1)); if (err) goto err_mtt_buddy; dev->mr_table.tavor_fmr.mpt_base = NULL; dev->mr_table.tavor_fmr.mtt_base = NULL; if (dev->limits.fmr_reserved_mtts) { i = fls(dev->limits.fmr_reserved_mtts - 1); if (i >= 31) { mthca_warn(dev, "Unable to reserve 2^31 FMR MTTs.\n"); err = -EINVAL; goto err_fmr_mpt; } mpts = mtts = 1 << i; } else { mtts = dev->limits.num_mtt_segs; mpts = dev->limits.num_mpts; } if (!mthca_is_memfree(dev) && (dev->mthca_flags & MTHCA_FLAG_FMR)) { addr = pci_resource_start(dev->pdev, 4) + ((pci_resource_len(dev->pdev, 4) - 1) & dev->mr_table.mpt_base); dev->mr_table.tavor_fmr.mpt_base = ioremap(addr, mpts * sizeof(struct mthca_mpt_entry)); if (!dev->mr_table.tavor_fmr.mpt_base) { mthca_warn(dev, "MPT ioremap for FMR failed.\n"); err = -ENOMEM; goto err_fmr_mpt; } addr = pci_resource_start(dev->pdev, 4) + ((pci_resource_len(dev->pdev, 4) - 1) & dev->mr_table.mtt_base); dev->mr_table.tavor_fmr.mtt_base = ioremap(addr, mtts * dev->limits.mtt_seg_size); if (!dev->mr_table.tavor_fmr.mtt_base) { mthca_warn(dev, "MTT ioremap for FMR failed.\n"); err = -ENOMEM; goto err_fmr_mtt; } } if (dev->limits.fmr_reserved_mtts) { err = mthca_buddy_init(&dev->mr_table.tavor_fmr.mtt_buddy, fls(mtts - 1)); if (err) goto err_fmr_mtt_buddy; /* Prevent regular MRs from using FMR keys */ err = mthca_buddy_alloc(&dev->mr_table.mtt_buddy, fls(mtts - 1)); if (err) goto err_reserve_fmr; dev->mr_table.fmr_mtt_buddy = &dev->mr_table.tavor_fmr.mtt_buddy; } else dev->mr_table.fmr_mtt_buddy = &dev->mr_table.mtt_buddy; /* FMR table is always the first, take reserved MTTs out of there */ if (dev->limits.reserved_mtts) { i = fls(dev->limits.reserved_mtts - 1); if (mthca_alloc_mtt_range(dev, i, dev->mr_table.fmr_mtt_buddy) == -1) { mthca_warn(dev, "MTT table of order %d is too small.\n", dev->mr_table.fmr_mtt_buddy->max_order); err = -ENOMEM; goto err_reserve_mtts; } } return 0; err_reserve_mtts: err_reserve_fmr: if (dev->limits.fmr_reserved_mtts) mthca_buddy_cleanup(&dev->mr_table.tavor_fmr.mtt_buddy); err_fmr_mtt_buddy: if (dev->mr_table.tavor_fmr.mtt_base) iounmap(dev->mr_table.tavor_fmr.mtt_base); err_fmr_mtt: if (dev->mr_table.tavor_fmr.mpt_base) iounmap(dev->mr_table.tavor_fmr.mpt_base); err_fmr_mpt: mthca_buddy_cleanup(&dev->mr_table.mtt_buddy); err_mtt_buddy: mthca_alloc_cleanup(&dev->mr_table.mpt_alloc); return err; } void mthca_cleanup_mr_table(struct mthca_dev *dev) { /* XXX check if any MRs are still allocated? */ if (dev->limits.fmr_reserved_mtts) mthca_buddy_cleanup(&dev->mr_table.tavor_fmr.mtt_buddy); mthca_buddy_cleanup(&dev->mr_table.mtt_buddy); if (dev->mr_table.tavor_fmr.mtt_base) iounmap(dev->mr_table.tavor_fmr.mtt_base); if (dev->mr_table.tavor_fmr.mpt_base) iounmap(dev->mr_table.tavor_fmr.mpt_base); mthca_alloc_cleanup(&dev->mr_table.mpt_alloc); }
linux-master
drivers/infiniband/hw/mthca/mthca_mr.c
/* * Copyright (c) 2004 Topspin Communications. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/errno.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/slab.h> #include "mthca_dev.h" #include "mthca_cmd.h" int mthca_reset(struct mthca_dev *mdev) { int i; int err = 0; u32 *hca_header = NULL; u32 *bridge_header = NULL; struct pci_dev *bridge = NULL; int bridge_pcix_cap = 0; int hca_pcie_cap = 0; int hca_pcix_cap = 0; u16 devctl; u16 linkctl; #define MTHCA_RESET_OFFSET 0xf0010 #define MTHCA_RESET_VALUE swab32(1) /* * Reset the chip. This is somewhat ugly because we have to * save off the PCI header before reset and then restore it * after the chip reboots. We skip config space offsets 22 * and 23 since those have a special meaning. * * To make matters worse, for Tavor (PCI-X HCA) we have to * find the associated bridge device and save off its PCI * header as well. */ if (!(mdev->mthca_flags & MTHCA_FLAG_PCIE)) { /* Look for the bridge -- its device ID will be 2 more than HCA's device ID. */ while ((bridge = pci_get_device(mdev->pdev->vendor, mdev->pdev->device + 2, bridge)) != NULL) { if (bridge->hdr_type == PCI_HEADER_TYPE_BRIDGE && bridge->subordinate == mdev->pdev->bus) { mthca_dbg(mdev, "Found bridge: %s\n", pci_name(bridge)); break; } } if (!bridge) { /* * Didn't find a bridge for a Tavor device -- * assume we're in no-bridge mode and hope for * the best. */ mthca_warn(mdev, "No bridge found for %s\n", pci_name(mdev->pdev)); } } /* For Arbel do we need to save off the full 4K PCI Express header?? */ hca_header = kmalloc(256, GFP_KERNEL); if (!hca_header) { err = -ENOMEM; goto put_dev; } for (i = 0; i < 64; ++i) { if (i == 22 || i == 23) continue; if (pci_read_config_dword(mdev->pdev, i * 4, hca_header + i)) { err = -ENODEV; mthca_err(mdev, "Couldn't save HCA " "PCI header, aborting.\n"); goto free_hca; } } hca_pcix_cap = pci_find_capability(mdev->pdev, PCI_CAP_ID_PCIX); hca_pcie_cap = pci_pcie_cap(mdev->pdev); if (bridge) { bridge_header = kmalloc(256, GFP_KERNEL); if (!bridge_header) { err = -ENOMEM; goto free_hca; } for (i = 0; i < 64; ++i) { if (i == 22 || i == 23) continue; if (pci_read_config_dword(bridge, i * 4, bridge_header + i)) { err = -ENODEV; mthca_err(mdev, "Couldn't save HCA bridge " "PCI header, aborting.\n"); goto free_bh; } } bridge_pcix_cap = pci_find_capability(bridge, PCI_CAP_ID_PCIX); if (!bridge_pcix_cap) { err = -ENODEV; mthca_err(mdev, "Couldn't locate HCA bridge " "PCI-X capability, aborting.\n"); goto free_bh; } } /* actually hit reset */ { void __iomem *reset = ioremap(pci_resource_start(mdev->pdev, 0) + MTHCA_RESET_OFFSET, 4); if (!reset) { err = -ENOMEM; mthca_err(mdev, "Couldn't map HCA reset register, " "aborting.\n"); goto free_bh; } writel(MTHCA_RESET_VALUE, reset); iounmap(reset); } /* Docs say to wait one second before accessing device */ msleep(1000); /* Now wait for PCI device to start responding again */ { u32 v; int c = 0; for (c = 0; c < 100; ++c) { if (pci_read_config_dword(bridge ? bridge : mdev->pdev, 0, &v)) { err = -ENODEV; mthca_err(mdev, "Couldn't access HCA after reset, " "aborting.\n"); goto free_bh; } if (v != 0xffffffff) goto good; msleep(100); } err = -ENODEV; mthca_err(mdev, "PCI device did not come back after reset, " "aborting.\n"); goto free_bh; } good: /* Now restore the PCI headers */ if (bridge) { if (pci_write_config_dword(bridge, bridge_pcix_cap + 0x8, bridge_header[(bridge_pcix_cap + 0x8) / 4])) { err = -ENODEV; mthca_err(mdev, "Couldn't restore HCA bridge Upstream " "split transaction control, aborting.\n"); goto free_bh; } if (pci_write_config_dword(bridge, bridge_pcix_cap + 0xc, bridge_header[(bridge_pcix_cap + 0xc) / 4])) { err = -ENODEV; mthca_err(mdev, "Couldn't restore HCA bridge Downstream " "split transaction control, aborting.\n"); goto free_bh; } /* * Bridge control register is at 0x3e, so we'll * naturally restore it last in this loop. */ for (i = 0; i < 16; ++i) { if (i * 4 == PCI_COMMAND) continue; if (pci_write_config_dword(bridge, i * 4, bridge_header[i])) { err = -ENODEV; mthca_err(mdev, "Couldn't restore HCA bridge reg %x, " "aborting.\n", i); goto free_bh; } } if (pci_write_config_dword(bridge, PCI_COMMAND, bridge_header[PCI_COMMAND / 4])) { err = -ENODEV; mthca_err(mdev, "Couldn't restore HCA bridge COMMAND, " "aborting.\n"); goto free_bh; } } if (hca_pcix_cap) { if (pci_write_config_dword(mdev->pdev, hca_pcix_cap, hca_header[hca_pcix_cap / 4])) { err = -ENODEV; mthca_err(mdev, "Couldn't restore HCA PCI-X " "command register, aborting.\n"); goto free_bh; } } if (hca_pcie_cap) { devctl = hca_header[(hca_pcie_cap + PCI_EXP_DEVCTL) / 4]; if (pcie_capability_write_word(mdev->pdev, PCI_EXP_DEVCTL, devctl)) { err = -ENODEV; mthca_err(mdev, "Couldn't restore HCA PCI Express " "Device Control register, aborting.\n"); goto free_bh; } linkctl = hca_header[(hca_pcie_cap + PCI_EXP_LNKCTL) / 4]; if (pcie_capability_write_word(mdev->pdev, PCI_EXP_LNKCTL, linkctl)) { err = -ENODEV; mthca_err(mdev, "Couldn't restore HCA PCI Express " "Link control register, aborting.\n"); goto free_bh; } } for (i = 0; i < 16; ++i) { if (i * 4 == PCI_COMMAND) continue; if (pci_write_config_dword(mdev->pdev, i * 4, hca_header[i])) { err = -ENODEV; mthca_err(mdev, "Couldn't restore HCA reg %x, " "aborting.\n", i); goto free_bh; } } if (pci_write_config_dword(mdev->pdev, PCI_COMMAND, hca_header[PCI_COMMAND / 4])) { err = -ENODEV; mthca_err(mdev, "Couldn't restore HCA COMMAND, " "aborting.\n"); } free_bh: kfree(bridge_header); free_hca: kfree(hca_header); put_dev: pci_dev_put(bridge); return err; }
linux-master
drivers/infiniband/hw/mthca/mthca_reset.c
/* * Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * Copyright (c) 2004 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/string.h> #include <linux/slab.h> #include <rdma/ib_verbs.h> #include <rdma/ib_mad.h> #include <rdma/ib_smi.h> #include "mthca_dev.h" #include "mthca_cmd.h" enum { MTHCA_VENDOR_CLASS1 = 0x9, MTHCA_VENDOR_CLASS2 = 0xa }; static int mthca_update_rate(struct mthca_dev *dev, u8 port_num) { struct ib_port_attr *tprops = NULL; int ret; tprops = kmalloc(sizeof *tprops, GFP_KERNEL); if (!tprops) return -ENOMEM; ret = ib_query_port(&dev->ib_dev, port_num, tprops); if (ret) { dev_warn(&dev->ib_dev.dev, "ib_query_port failed (%d) forport %d\n", ret, port_num); goto out; } dev->rate[port_num - 1] = tprops->active_speed * ib_width_enum_to_int(tprops->active_width); out: kfree(tprops); return ret; } static void update_sm_ah(struct mthca_dev *dev, u8 port_num, u16 lid, u8 sl) { struct ib_ah *new_ah; struct rdma_ah_attr ah_attr; unsigned long flags; if (!dev->send_agent[port_num - 1][0]) return; memset(&ah_attr, 0, sizeof ah_attr); ah_attr.type = rdma_ah_find_type(&dev->ib_dev, port_num); rdma_ah_set_dlid(&ah_attr, lid); rdma_ah_set_sl(&ah_attr, sl); rdma_ah_set_port_num(&ah_attr, port_num); new_ah = rdma_create_ah(dev->send_agent[port_num - 1][0]->qp->pd, &ah_attr, 0); if (IS_ERR(new_ah)) return; spin_lock_irqsave(&dev->sm_lock, flags); if (dev->sm_ah[port_num - 1]) rdma_destroy_ah(dev->sm_ah[port_num - 1], 0); dev->sm_ah[port_num - 1] = new_ah; spin_unlock_irqrestore(&dev->sm_lock, flags); } /* * Snoop SM MADs for port info and P_Key table sets, so we can * synthesize LID change and P_Key change events. */ static void smp_snoop(struct ib_device *ibdev, u8 port_num, const struct ib_mad *mad, u16 prev_lid) { struct ib_event event; if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && mad->mad_hdr.method == IB_MGMT_METHOD_SET) { if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) { struct ib_port_info *pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data; u16 lid = be16_to_cpu(pinfo->lid); mthca_update_rate(to_mdev(ibdev), port_num); update_sm_ah(to_mdev(ibdev), port_num, be16_to_cpu(pinfo->sm_lid), pinfo->neighbormtu_mastersmsl & 0xf); event.device = ibdev; event.element.port_num = port_num; if (pinfo->clientrereg_resv_subnetto & 0x80) { event.event = IB_EVENT_CLIENT_REREGISTER; ib_dispatch_event(&event); } if (prev_lid != lid) { event.event = IB_EVENT_LID_CHANGE; ib_dispatch_event(&event); } } if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) { event.device = ibdev; event.event = IB_EVENT_PKEY_CHANGE; event.element.port_num = port_num; ib_dispatch_event(&event); } } } static void node_desc_override(struct ib_device *dev, struct ib_mad *mad) { if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP && mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) { mutex_lock(&to_mdev(dev)->cap_mask_mutex); memcpy(((struct ib_smp *) mad)->data, dev->node_desc, IB_DEVICE_NODE_DESC_MAX); mutex_unlock(&to_mdev(dev)->cap_mask_mutex); } } static void forward_trap(struct mthca_dev *dev, u32 port_num, const struct ib_mad *mad) { int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED; struct ib_mad_send_buf *send_buf; struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn]; int ret; unsigned long flags; if (agent) { send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, GFP_ATOMIC, IB_MGMT_BASE_VERSION); if (IS_ERR(send_buf)) return; /* * We rely here on the fact that MLX QPs don't use the * address handle after the send is posted (this is * wrong following the IB spec strictly, but we know * it's OK for our devices). */ spin_lock_irqsave(&dev->sm_lock, flags); memcpy(send_buf->mad, mad, sizeof *mad); if ((send_buf->ah = dev->sm_ah[port_num - 1])) ret = ib_post_send_mad(send_buf, NULL); else ret = -EINVAL; spin_unlock_irqrestore(&dev->sm_lock, flags); if (ret) ib_free_send_mad(send_buf); } } int mthca_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num, const struct ib_wc *in_wc, const struct ib_grh *in_grh, const struct ib_mad *in, struct ib_mad *out, size_t *out_mad_size, u16 *out_mad_pkey_index) { int err; u16 slid = in_wc ? ib_lid_cpu16(in_wc->slid) : be16_to_cpu(IB_LID_PERMISSIVE); u16 prev_lid = 0; struct ib_port_attr pattr; /* Forward locally generated traps to the SM */ if (in->mad_hdr.method == IB_MGMT_METHOD_TRAP && !slid) { forward_trap(to_mdev(ibdev), port_num, in); return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; } /* * Only handle SM gets, sets and trap represses for SM class * * Only handle PMA and Mellanox vendor-specific class gets and * sets for other classes. */ if (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { if (in->mad_hdr.method != IB_MGMT_METHOD_GET && in->mad_hdr.method != IB_MGMT_METHOD_SET && in->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS) return IB_MAD_RESULT_SUCCESS; /* * Don't process SMInfo queries or vendor-specific * MADs -- the SMA can't handle them. */ if (in->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO || ((in->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) == IB_SMP_ATTR_VENDOR_MASK)) return IB_MAD_RESULT_SUCCESS; } else if (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT || in->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS1 || in->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS2) { if (in->mad_hdr.method != IB_MGMT_METHOD_GET && in->mad_hdr.method != IB_MGMT_METHOD_SET) return IB_MAD_RESULT_SUCCESS; } else return IB_MAD_RESULT_SUCCESS; if ((in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && in->mad_hdr.method == IB_MGMT_METHOD_SET && in->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO && !ib_query_port(ibdev, port_num, &pattr)) prev_lid = ib_lid_cpu16(pattr.lid); err = mthca_MAD_IFC(to_mdev(ibdev), mad_flags & IB_MAD_IGNORE_MKEY, mad_flags & IB_MAD_IGNORE_BKEY, port_num, in_wc, in_grh, in, out); if (err == -EBADMSG) return IB_MAD_RESULT_SUCCESS; else if (err) { mthca_err(to_mdev(ibdev), "MAD_IFC returned %d\n", err); return IB_MAD_RESULT_FAILURE; } if (!out->mad_hdr.status) { smp_snoop(ibdev, port_num, in, prev_lid); node_desc_override(ibdev, out); } /* set return bit in status of directed route responses */ if (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) out->mad_hdr.status |= cpu_to_be16(1 << 15); if (in->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) /* no response for trap repress */ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; } static void send_handler(struct ib_mad_agent *agent, struct ib_mad_send_wc *mad_send_wc) { ib_free_send_mad(mad_send_wc->send_buf); } int mthca_create_agents(struct mthca_dev *dev) { struct ib_mad_agent *agent; int p, q; int ret; spin_lock_init(&dev->sm_lock); for (p = 0; p < dev->limits.num_ports; ++p) for (q = 0; q <= 1; ++q) { agent = ib_register_mad_agent(&dev->ib_dev, p + 1, q ? IB_QPT_GSI : IB_QPT_SMI, NULL, 0, send_handler, NULL, NULL, 0); if (IS_ERR(agent)) { ret = PTR_ERR(agent); goto err; } dev->send_agent[p][q] = agent; } for (p = 1; p <= dev->limits.num_ports; ++p) { ret = mthca_update_rate(dev, p); if (ret) { mthca_err(dev, "Failed to obtain port %d rate." " aborting.\n", p); goto err; } } return 0; err: for (p = 0; p < dev->limits.num_ports; ++p) for (q = 0; q <= 1; ++q) if (dev->send_agent[p][q]) ib_unregister_mad_agent(dev->send_agent[p][q]); return ret; } void mthca_free_agents(struct mthca_dev *dev) { struct ib_mad_agent *agent; int p, q; for (p = 0; p < dev->limits.num_ports; ++p) { for (q = 0; q <= 1; ++q) { agent = dev->send_agent[p][q]; dev->send_agent[p][q] = NULL; ib_unregister_mad_agent(agent); } if (dev->sm_ah[p]) rdma_destroy_ah(dev->sm_ah[p], RDMA_DESTROY_AH_SLEEPABLE); } }
linux-master
drivers/infiniband/hw/mthca/mthca_mad.c
/* * Copyright (c) 2005 Cisco Systems. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/jiffies.h> #include <linux/module.h> #include <linux/timer.h> #include <linux/workqueue.h> #include "mthca_dev.h" enum { MTHCA_CATAS_POLL_INTERVAL = 5 * HZ, MTHCA_CATAS_TYPE_INTERNAL = 0, MTHCA_CATAS_TYPE_UPLINK = 3, MTHCA_CATAS_TYPE_DDR = 4, MTHCA_CATAS_TYPE_PARITY = 5, }; static DEFINE_SPINLOCK(catas_lock); static LIST_HEAD(catas_list); static struct workqueue_struct *catas_wq; static struct work_struct catas_work; static int catas_reset_disable; module_param_named(catas_reset_disable, catas_reset_disable, int, 0644); MODULE_PARM_DESC(catas_reset_disable, "disable reset on catastrophic event if nonzero"); static void catas_reset(struct work_struct *work) { struct mthca_dev *dev, *tmpdev; LIST_HEAD(tlist); int ret; mutex_lock(&mthca_device_mutex); spin_lock_irq(&catas_lock); list_splice_init(&catas_list, &tlist); spin_unlock_irq(&catas_lock); list_for_each_entry_safe(dev, tmpdev, &tlist, catas_err.list) { struct pci_dev *pdev = dev->pdev; ret = __mthca_restart_one(dev->pdev); /* 'dev' now is not valid */ if (ret) printk(KERN_ERR "mthca %s: Reset failed (%d)\n", pci_name(pdev), ret); else { struct mthca_dev *d = pci_get_drvdata(pdev); mthca_dbg(d, "Reset succeeded\n"); } } mutex_unlock(&mthca_device_mutex); } static void handle_catas(struct mthca_dev *dev) { struct ib_event event; unsigned long flags; const char *type; int i; event.device = &dev->ib_dev; event.event = IB_EVENT_DEVICE_FATAL; event.element.port_num = 0; dev->active = false; ib_dispatch_event(&event); switch (swab32(readl(dev->catas_err.map)) >> 24) { case MTHCA_CATAS_TYPE_INTERNAL: type = "internal error"; break; case MTHCA_CATAS_TYPE_UPLINK: type = "uplink bus error"; break; case MTHCA_CATAS_TYPE_DDR: type = "DDR data error"; break; case MTHCA_CATAS_TYPE_PARITY: type = "internal parity error"; break; default: type = "unknown error"; break; } mthca_err(dev, "Catastrophic error detected: %s\n", type); for (i = 0; i < dev->catas_err.size; ++i) mthca_err(dev, " buf[%02x]: %08x\n", i, swab32(readl(dev->catas_err.map + i))); if (catas_reset_disable) return; spin_lock_irqsave(&catas_lock, flags); list_add(&dev->catas_err.list, &catas_list); queue_work(catas_wq, &catas_work); spin_unlock_irqrestore(&catas_lock, flags); } static void poll_catas(struct timer_list *t) { struct mthca_dev *dev = from_timer(dev, t, catas_err.timer); int i; for (i = 0; i < dev->catas_err.size; ++i) if (readl(dev->catas_err.map + i)) { handle_catas(dev); return; } mod_timer(&dev->catas_err.timer, round_jiffies(jiffies + MTHCA_CATAS_POLL_INTERVAL)); } void mthca_start_catas_poll(struct mthca_dev *dev) { phys_addr_t addr; timer_setup(&dev->catas_err.timer, poll_catas, 0); dev->catas_err.map = NULL; addr = pci_resource_start(dev->pdev, 0) + ((pci_resource_len(dev->pdev, 0) - 1) & dev->catas_err.addr); dev->catas_err.map = ioremap(addr, dev->catas_err.size * 4); if (!dev->catas_err.map) { mthca_warn(dev, "couldn't map catastrophic error region " "at 0x%llx/0x%x\n", (unsigned long long) addr, dev->catas_err.size * 4); return; } dev->catas_err.timer.expires = jiffies + MTHCA_CATAS_POLL_INTERVAL; INIT_LIST_HEAD(&dev->catas_err.list); add_timer(&dev->catas_err.timer); } void mthca_stop_catas_poll(struct mthca_dev *dev) { del_timer_sync(&dev->catas_err.timer); if (dev->catas_err.map) iounmap(dev->catas_err.map); spin_lock_irq(&catas_lock); list_del(&dev->catas_err.list); spin_unlock_irq(&catas_lock); } int __init mthca_catas_init(void) { INIT_WORK(&catas_work, catas_reset); catas_wq = alloc_ordered_workqueue("mthca_catas", WQ_MEM_RECLAIM); if (!catas_wq) return -ENOMEM; return 0; } void mthca_catas_cleanup(void) { destroy_workqueue(catas_wq); }
linux-master
drivers/infiniband/hw/mthca/mthca_catas.c
/* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * Copyright (c) 2004 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <rdma/ib_smi.h> #include <rdma/ib_umem.h> #include <rdma/ib_user_verbs.h> #include <rdma/uverbs_ioctl.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/stat.h> #include <linux/mm.h> #include <linux/export.h> #include "mthca_dev.h" #include "mthca_cmd.h" #include <rdma/mthca-abi.h> #include "mthca_memfree.h" static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *props, struct ib_udata *uhw) { struct ib_smp *in_mad; struct ib_smp *out_mad; int err = -ENOMEM; struct mthca_dev *mdev = to_mdev(ibdev); if (uhw->inlen || uhw->outlen) return -EINVAL; in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); if (!in_mad || !out_mad) goto out; memset(props, 0, sizeof *props); props->fw_ver = mdev->fw_ver; ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; err = mthca_MAD_IFC(mdev, 1, 1, 1, NULL, NULL, in_mad, out_mad); if (err) goto out; props->device_cap_flags = mdev->device_cap_flags; props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & 0xffffff; props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30)); props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32)); memcpy(&props->sys_image_guid, out_mad->data + 4, 8); props->max_mr_size = ~0ull; props->page_size_cap = mdev->limits.page_size_cap; props->max_qp = mdev->limits.num_qps - mdev->limits.reserved_qps; props->max_qp_wr = mdev->limits.max_wqes; props->max_send_sge = mdev->limits.max_sg; props->max_recv_sge = mdev->limits.max_sg; props->max_sge_rd = mdev->limits.max_sg; props->max_cq = mdev->limits.num_cqs - mdev->limits.reserved_cqs; props->max_cqe = mdev->limits.max_cqes; props->max_mr = mdev->limits.num_mpts - mdev->limits.reserved_mrws; props->max_pd = mdev->limits.num_pds - mdev->limits.reserved_pds; props->max_qp_rd_atom = 1 << mdev->qp_table.rdb_shift; props->max_qp_init_rd_atom = mdev->limits.max_qp_init_rdma; props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; props->max_srq = mdev->limits.num_srqs - mdev->limits.reserved_srqs; props->max_srq_wr = mdev->limits.max_srq_wqes; props->max_srq_sge = mdev->limits.max_srq_sge; props->local_ca_ack_delay = mdev->limits.local_ca_ack_delay; props->atomic_cap = mdev->limits.flags & DEV_LIM_FLAG_ATOMIC ? IB_ATOMIC_HCA : IB_ATOMIC_NONE; props->max_pkeys = mdev->limits.pkey_table_len; props->max_mcast_grp = mdev->limits.num_mgms + mdev->limits.num_amgms; props->max_mcast_qp_attach = MTHCA_QP_PER_MGM; props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * props->max_mcast_grp; err = 0; out: kfree(in_mad); kfree(out_mad); return err; } static int mthca_query_port(struct ib_device *ibdev, u32 port, struct ib_port_attr *props) { struct ib_smp *in_mad; struct ib_smp *out_mad; int err = -ENOMEM; in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); if (!in_mad || !out_mad) goto out; /* props being zeroed by the caller, avoid zeroing it here */ ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; in_mad->attr_mod = cpu_to_be32(port); err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); if (err) goto out; props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16)); props->lmc = out_mad->data[34] & 0x7; props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18)); props->sm_sl = out_mad->data[36] & 0xf; props->state = out_mad->data[32] & 0xf; props->phys_state = out_mad->data[33] >> 4; props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20)); props->gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len; props->max_msg_sz = 0x80000000; props->pkey_tbl_len = to_mdev(ibdev)->limits.pkey_table_len; props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46)); props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48)); props->active_width = out_mad->data[31] & 0xf; props->active_speed = out_mad->data[35] >> 4; props->max_mtu = out_mad->data[41] & 0xf; props->active_mtu = out_mad->data[36] >> 4; props->subnet_timeout = out_mad->data[51] & 0x1f; props->max_vl_num = out_mad->data[37] >> 4; props->init_type_reply = out_mad->data[41] >> 4; out: kfree(in_mad); kfree(out_mad); return err; } static int mthca_modify_device(struct ib_device *ibdev, int mask, struct ib_device_modify *props) { if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) return -EOPNOTSUPP; if (mask & IB_DEVICE_MODIFY_NODE_DESC) { if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex)) return -ERESTARTSYS; memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX); mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex); } return 0; } static int mthca_modify_port(struct ib_device *ibdev, u32 port, int port_modify_mask, struct ib_port_modify *props) { struct mthca_set_ib_param set_ib; struct ib_port_attr attr; int err; if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex)) return -ERESTARTSYS; err = ib_query_port(ibdev, port, &attr); if (err) goto out; set_ib.set_si_guid = 0; set_ib.reset_qkey_viol = !!(port_modify_mask & IB_PORT_RESET_QKEY_CNTR); set_ib.cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) & ~props->clr_port_cap_mask; err = mthca_SET_IB(to_mdev(ibdev), &set_ib, port); if (err) goto out; out: mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex); return err; } static int mthca_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey) { struct ib_smp *in_mad; struct ib_smp *out_mad; int err = -ENOMEM; in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); if (!in_mad || !out_mad) goto out; ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE; in_mad->attr_mod = cpu_to_be32(index / 32); err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); if (err) goto out; *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]); out: kfree(in_mad); kfree(out_mad); return err; } static int mthca_query_gid(struct ib_device *ibdev, u32 port, int index, union ib_gid *gid) { struct ib_smp *in_mad; struct ib_smp *out_mad; int err = -ENOMEM; in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); if (!in_mad || !out_mad) goto out; ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; in_mad->attr_mod = cpu_to_be32(port); err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); if (err) goto out; memcpy(gid->raw, out_mad->data + 8, 8); ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; in_mad->attr_mod = cpu_to_be32(index / 8); err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); if (err) goto out; memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8); out: kfree(in_mad); kfree(out_mad); return err; } static int mthca_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) { struct ib_device *ibdev = uctx->device; struct mthca_alloc_ucontext_resp uresp = {}; struct mthca_ucontext *context = to_mucontext(uctx); int err; if (!(to_mdev(ibdev)->active)) return -EAGAIN; uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps; if (mthca_is_memfree(to_mdev(ibdev))) uresp.uarc_size = to_mdev(ibdev)->uar_table.uarc_size; else uresp.uarc_size = 0; err = mthca_uar_alloc(to_mdev(ibdev), &context->uar); if (err) return err; context->db_tab = mthca_init_user_db_tab(to_mdev(ibdev)); if (IS_ERR(context->db_tab)) { err = PTR_ERR(context->db_tab); mthca_uar_free(to_mdev(ibdev), &context->uar); return err; } if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) { mthca_cleanup_user_db_tab(to_mdev(ibdev), &context->uar, context->db_tab); mthca_uar_free(to_mdev(ibdev), &context->uar); return -EFAULT; } context->reg_mr_warned = 0; return 0; } static void mthca_dealloc_ucontext(struct ib_ucontext *context) { mthca_cleanup_user_db_tab(to_mdev(context->device), &to_mucontext(context)->uar, to_mucontext(context)->db_tab); mthca_uar_free(to_mdev(context->device), &to_mucontext(context)->uar); } static int mthca_mmap_uar(struct ib_ucontext *context, struct vm_area_struct *vma) { if (vma->vm_end - vma->vm_start != PAGE_SIZE) return -EINVAL; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); if (io_remap_pfn_range(vma, vma->vm_start, to_mucontext(context)->uar.pfn, PAGE_SIZE, vma->vm_page_prot)) return -EAGAIN; return 0; } static int mthca_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct ib_device *ibdev = ibpd->device; struct mthca_pd *pd = to_mpd(ibpd); int err; err = mthca_pd_alloc(to_mdev(ibdev), !udata, pd); if (err) return err; if (udata) { if (ib_copy_to_udata(udata, &pd->pd_num, sizeof (__u32))) { mthca_pd_free(to_mdev(ibdev), pd); return -EFAULT; } } return 0; } static int mthca_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) { mthca_pd_free(to_mdev(pd->device), to_mpd(pd)); return 0; } static int mthca_ah_create(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, struct ib_udata *udata) { struct mthca_ah *ah = to_mah(ibah); return mthca_create_ah(to_mdev(ibah->device), to_mpd(ibah->pd), init_attr->ah_attr, ah); } static int mthca_ah_destroy(struct ib_ah *ah, u32 flags) { mthca_destroy_ah(to_mdev(ah->device), to_mah(ah)); return 0; } static int mthca_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr, struct ib_udata *udata) { struct mthca_create_srq ucmd; struct mthca_ucontext *context = rdma_udata_to_drv_context( udata, struct mthca_ucontext, ibucontext); struct mthca_srq *srq = to_msrq(ibsrq); int err; if (init_attr->srq_type != IB_SRQT_BASIC) return -EOPNOTSUPP; if (udata) { if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) return -EFAULT; err = mthca_map_user_db(to_mdev(ibsrq->device), &context->uar, context->db_tab, ucmd.db_index, ucmd.db_page); if (err) return err; srq->mr.ibmr.lkey = ucmd.lkey; srq->db_index = ucmd.db_index; } err = mthca_alloc_srq(to_mdev(ibsrq->device), to_mpd(ibsrq->pd), &init_attr->attr, srq, udata); if (err && udata) mthca_unmap_user_db(to_mdev(ibsrq->device), &context->uar, context->db_tab, ucmd.db_index); if (err) return err; if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof(__u32))) { mthca_free_srq(to_mdev(ibsrq->device), srq); return -EFAULT; } return 0; } static int mthca_destroy_srq(struct ib_srq *srq, struct ib_udata *udata) { if (udata) { struct mthca_ucontext *context = rdma_udata_to_drv_context( udata, struct mthca_ucontext, ibucontext); mthca_unmap_user_db(to_mdev(srq->device), &context->uar, context->db_tab, to_msrq(srq)->db_index); } mthca_free_srq(to_mdev(srq->device), to_msrq(srq)); return 0; } static int mthca_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr, struct ib_udata *udata) { struct mthca_ucontext *context = rdma_udata_to_drv_context( udata, struct mthca_ucontext, ibucontext); struct mthca_create_qp ucmd; struct mthca_qp *qp = to_mqp(ibqp); struct mthca_dev *dev = to_mdev(ibqp->device); int err; if (init_attr->create_flags) return -EOPNOTSUPP; switch (init_attr->qp_type) { case IB_QPT_RC: case IB_QPT_UC: case IB_QPT_UD: { if (udata) { if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) return -EFAULT; err = mthca_map_user_db(dev, &context->uar, context->db_tab, ucmd.sq_db_index, ucmd.sq_db_page); if (err) return err; err = mthca_map_user_db(dev, &context->uar, context->db_tab, ucmd.rq_db_index, ucmd.rq_db_page); if (err) { mthca_unmap_user_db(dev, &context->uar, context->db_tab, ucmd.sq_db_index); return err; } qp->mr.ibmr.lkey = ucmd.lkey; qp->sq.db_index = ucmd.sq_db_index; qp->rq.db_index = ucmd.rq_db_index; } err = mthca_alloc_qp(dev, to_mpd(ibqp->pd), to_mcq(init_attr->send_cq), to_mcq(init_attr->recv_cq), init_attr->qp_type, init_attr->sq_sig_type, &init_attr->cap, qp, udata); if (err && udata) { mthca_unmap_user_db(dev, &context->uar, context->db_tab, ucmd.sq_db_index); mthca_unmap_user_db(dev, &context->uar, context->db_tab, ucmd.rq_db_index); } qp->ibqp.qp_num = qp->qpn; break; } case IB_QPT_SMI: case IB_QPT_GSI: { qp->sqp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL); if (!qp->sqp) return -ENOMEM; qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1; err = mthca_alloc_sqp(dev, to_mpd(ibqp->pd), to_mcq(init_attr->send_cq), to_mcq(init_attr->recv_cq), init_attr->sq_sig_type, &init_attr->cap, qp->ibqp.qp_num, init_attr->port_num, qp, udata); break; } default: /* Don't support raw QPs */ return -EOPNOTSUPP; } if (err) { kfree(qp->sqp); return err; } init_attr->cap.max_send_wr = qp->sq.max; init_attr->cap.max_recv_wr = qp->rq.max; init_attr->cap.max_send_sge = qp->sq.max_gs; init_attr->cap.max_recv_sge = qp->rq.max_gs; init_attr->cap.max_inline_data = qp->max_inline_data; return 0; } static int mthca_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) { if (udata) { struct mthca_ucontext *context = rdma_udata_to_drv_context( udata, struct mthca_ucontext, ibucontext); mthca_unmap_user_db(to_mdev(qp->device), &context->uar, context->db_tab, to_mqp(qp)->sq.db_index); mthca_unmap_user_db(to_mdev(qp->device), &context->uar, context->db_tab, to_mqp(qp)->rq.db_index); } mthca_free_qp(to_mdev(qp->device), to_mqp(qp)); kfree(to_mqp(qp)->sqp); return 0; } static int mthca_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct ib_udata *udata) { struct ib_device *ibdev = ibcq->device; int entries = attr->cqe; struct mthca_create_cq ucmd; struct mthca_cq *cq; int nent; int err; struct mthca_ucontext *context = rdma_udata_to_drv_context( udata, struct mthca_ucontext, ibucontext); if (attr->flags) return -EOPNOTSUPP; if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes) return -EINVAL; if (udata) { if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) return -EFAULT; err = mthca_map_user_db(to_mdev(ibdev), &context->uar, context->db_tab, ucmd.set_db_index, ucmd.set_db_page); if (err) return err; err = mthca_map_user_db(to_mdev(ibdev), &context->uar, context->db_tab, ucmd.arm_db_index, ucmd.arm_db_page); if (err) goto err_unmap_set; } cq = to_mcq(ibcq); if (udata) { cq->buf.mr.ibmr.lkey = ucmd.lkey; cq->set_ci_db_index = ucmd.set_db_index; cq->arm_db_index = ucmd.arm_db_index; } for (nent = 1; nent <= entries; nent <<= 1) ; /* nothing */ err = mthca_init_cq(to_mdev(ibdev), nent, context, udata ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num, cq); if (err) goto err_unmap_arm; if (udata && ib_copy_to_udata(udata, &cq->cqn, sizeof(__u32))) { mthca_free_cq(to_mdev(ibdev), cq); err = -EFAULT; goto err_unmap_arm; } cq->resize_buf = NULL; return 0; err_unmap_arm: if (udata) mthca_unmap_user_db(to_mdev(ibdev), &context->uar, context->db_tab, ucmd.arm_db_index); err_unmap_set: if (udata) mthca_unmap_user_db(to_mdev(ibdev), &context->uar, context->db_tab, ucmd.set_db_index); return err; } static int mthca_alloc_resize_buf(struct mthca_dev *dev, struct mthca_cq *cq, int entries) { int ret; spin_lock_irq(&cq->lock); if (cq->resize_buf) { ret = -EBUSY; goto unlock; } cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC); if (!cq->resize_buf) { ret = -ENOMEM; goto unlock; } cq->resize_buf->state = CQ_RESIZE_ALLOC; ret = 0; unlock: spin_unlock_irq(&cq->lock); if (ret) return ret; ret = mthca_alloc_cq_buf(dev, &cq->resize_buf->buf, entries); if (ret) { spin_lock_irq(&cq->lock); kfree(cq->resize_buf); cq->resize_buf = NULL; spin_unlock_irq(&cq->lock); return ret; } cq->resize_buf->cqe = entries - 1; spin_lock_irq(&cq->lock); cq->resize_buf->state = CQ_RESIZE_READY; spin_unlock_irq(&cq->lock); return 0; } static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) { struct mthca_dev *dev = to_mdev(ibcq->device); struct mthca_cq *cq = to_mcq(ibcq); struct mthca_resize_cq ucmd; u32 lkey; int ret; if (entries < 1 || entries > dev->limits.max_cqes) return -EINVAL; mutex_lock(&cq->mutex); entries = roundup_pow_of_two(entries + 1); if (entries == ibcq->cqe + 1) { ret = 0; goto out; } if (cq->is_kernel) { ret = mthca_alloc_resize_buf(dev, cq, entries); if (ret) goto out; lkey = cq->resize_buf->buf.mr.ibmr.lkey; } else { if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { ret = -EFAULT; goto out; } lkey = ucmd.lkey; } ret = mthca_RESIZE_CQ(dev, cq->cqn, lkey, ilog2(entries)); if (ret) { if (cq->resize_buf) { mthca_free_cq_buf(dev, &cq->resize_buf->buf, cq->resize_buf->cqe); kfree(cq->resize_buf); spin_lock_irq(&cq->lock); cq->resize_buf = NULL; spin_unlock_irq(&cq->lock); } goto out; } if (cq->is_kernel) { struct mthca_cq_buf tbuf; int tcqe; spin_lock_irq(&cq->lock); if (cq->resize_buf->state == CQ_RESIZE_READY) { mthca_cq_resize_copy_cqes(cq); tbuf = cq->buf; tcqe = cq->ibcq.cqe; cq->buf = cq->resize_buf->buf; cq->ibcq.cqe = cq->resize_buf->cqe; } else { tbuf = cq->resize_buf->buf; tcqe = cq->resize_buf->cqe; } kfree(cq->resize_buf); cq->resize_buf = NULL; spin_unlock_irq(&cq->lock); mthca_free_cq_buf(dev, &tbuf, tcqe); } else ibcq->cqe = entries - 1; out: mutex_unlock(&cq->mutex); return ret; } static int mthca_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) { if (udata) { struct mthca_ucontext *context = rdma_udata_to_drv_context( udata, struct mthca_ucontext, ibucontext); mthca_unmap_user_db(to_mdev(cq->device), &context->uar, context->db_tab, to_mcq(cq)->arm_db_index); mthca_unmap_user_db(to_mdev(cq->device), &context->uar, context->db_tab, to_mcq(cq)->set_ci_db_index); } mthca_free_cq(to_mdev(cq->device), to_mcq(cq)); return 0; } static inline u32 convert_access(int acc) { return (acc & IB_ACCESS_REMOTE_ATOMIC ? MTHCA_MPT_FLAG_ATOMIC : 0) | (acc & IB_ACCESS_REMOTE_WRITE ? MTHCA_MPT_FLAG_REMOTE_WRITE : 0) | (acc & IB_ACCESS_REMOTE_READ ? MTHCA_MPT_FLAG_REMOTE_READ : 0) | (acc & IB_ACCESS_LOCAL_WRITE ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0) | MTHCA_MPT_FLAG_LOCAL_READ; } static struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, int acc) { struct mthca_mr *mr; int err; mr = kmalloc(sizeof *mr, GFP_KERNEL); if (!mr) return ERR_PTR(-ENOMEM); err = mthca_mr_alloc_notrans(to_mdev(pd->device), to_mpd(pd)->pd_num, convert_access(acc), mr); if (err) { kfree(mr); return ERR_PTR(err); } mr->umem = NULL; return &mr->ibmr; } static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt, int acc, struct ib_udata *udata) { struct mthca_dev *dev = to_mdev(pd->device); struct ib_block_iter biter; struct mthca_ucontext *context = rdma_udata_to_drv_context( udata, struct mthca_ucontext, ibucontext); struct mthca_mr *mr; struct mthca_reg_mr ucmd; u64 *pages; int n, i; int err = 0; int write_mtt_size; if (udata->inlen < sizeof ucmd) { if (!context->reg_mr_warned) { mthca_warn(dev, "Process '%s' did not pass in MR attrs.\n", current->comm); mthca_warn(dev, " Update libmthca to fix this.\n"); } ++context->reg_mr_warned; ucmd.mr_attrs = 0; } else if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) return ERR_PTR(-EFAULT); mr = kmalloc(sizeof *mr, GFP_KERNEL); if (!mr) return ERR_PTR(-ENOMEM); mr->umem = ib_umem_get(pd->device, start, length, acc); if (IS_ERR(mr->umem)) { err = PTR_ERR(mr->umem); goto err; } n = ib_umem_num_dma_blocks(mr->umem, PAGE_SIZE); mr->mtt = mthca_alloc_mtt(dev, n); if (IS_ERR(mr->mtt)) { err = PTR_ERR(mr->mtt); goto err_umem; } pages = (u64 *) __get_free_page(GFP_KERNEL); if (!pages) { err = -ENOMEM; goto err_mtt; } i = n = 0; write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages)); rdma_umem_for_each_dma_block(mr->umem, &biter, PAGE_SIZE) { pages[i++] = rdma_block_iter_dma_address(&biter); /* * Be friendly to write_mtt and pass it chunks * of appropriate size. */ if (i == write_mtt_size) { err = mthca_write_mtt(dev, mr->mtt, n, pages, i); if (err) goto mtt_done; n += i; i = 0; } } if (i) err = mthca_write_mtt(dev, mr->mtt, n, pages, i); mtt_done: free_page((unsigned long) pages); if (err) goto err_mtt; err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, PAGE_SHIFT, virt, length, convert_access(acc), mr); if (err) goto err_mtt; return &mr->ibmr; err_mtt: mthca_free_mtt(dev, mr->mtt); err_umem: ib_umem_release(mr->umem); err: kfree(mr); return ERR_PTR(err); } static int mthca_dereg_mr(struct ib_mr *mr, struct ib_udata *udata) { struct mthca_mr *mmr = to_mmr(mr); mthca_free_mr(to_mdev(mr->device), mmr); ib_umem_release(mmr->umem); kfree(mmr); return 0; } static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr, char *buf) { struct mthca_dev *dev = rdma_device_to_drv_device(device, struct mthca_dev, ib_dev); return sysfs_emit(buf, "%x\n", dev->rev_id); } static DEVICE_ATTR_RO(hw_rev); static const char *hca_type_string(int hca_type) { switch (hca_type) { case PCI_DEVICE_ID_MELLANOX_TAVOR: return "MT23108"; case PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT: return "MT25208 (MT23108 compat mode)"; case PCI_DEVICE_ID_MELLANOX_ARBEL: return "MT25208"; case PCI_DEVICE_ID_MELLANOX_SINAI: case PCI_DEVICE_ID_MELLANOX_SINAI_OLD: return "MT25204"; } return "unknown"; } static ssize_t hca_type_show(struct device *device, struct device_attribute *attr, char *buf) { struct mthca_dev *dev = rdma_device_to_drv_device(device, struct mthca_dev, ib_dev); return sysfs_emit(buf, "%s\n", hca_type_string(dev->pdev->device)); } static DEVICE_ATTR_RO(hca_type); static ssize_t board_id_show(struct device *device, struct device_attribute *attr, char *buf) { struct mthca_dev *dev = rdma_device_to_drv_device(device, struct mthca_dev, ib_dev); return sysfs_emit(buf, "%.*s\n", MTHCA_BOARD_ID_LEN, dev->board_id); } static DEVICE_ATTR_RO(board_id); static struct attribute *mthca_dev_attributes[] = { &dev_attr_hw_rev.attr, &dev_attr_hca_type.attr, &dev_attr_board_id.attr, NULL }; static const struct attribute_group mthca_attr_group = { .attrs = mthca_dev_attributes, }; static int mthca_init_node_data(struct mthca_dev *dev) { struct ib_smp *in_mad; struct ib_smp *out_mad; int err = -ENOMEM; in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); if (!in_mad || !out_mad) goto out; ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_NODE_DESC; err = mthca_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); if (err) goto out; memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX); in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; err = mthca_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); if (err) goto out; if (mthca_is_memfree(dev)) dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32)); memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); out: kfree(in_mad); kfree(out_mad); return err; } static int mthca_port_immutable(struct ib_device *ibdev, u32 port_num, struct ib_port_immutable *immutable) { struct ib_port_attr attr; int err; immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB; err = ib_query_port(ibdev, port_num, &attr); if (err) return err; immutable->pkey_tbl_len = attr.pkey_tbl_len; immutable->gid_tbl_len = attr.gid_tbl_len; immutable->max_mad_size = IB_MGMT_MAD_SIZE; return 0; } static void get_dev_fw_str(struct ib_device *device, char *str) { struct mthca_dev *dev = container_of(device, struct mthca_dev, ib_dev); snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d", (int) (dev->fw_ver >> 32), (int) (dev->fw_ver >> 16) & 0xffff, (int) dev->fw_ver & 0xffff); } static const struct ib_device_ops mthca_dev_ops = { .owner = THIS_MODULE, .driver_id = RDMA_DRIVER_MTHCA, .uverbs_abi_ver = MTHCA_UVERBS_ABI_VERSION, .uverbs_no_driver_id_binding = 1, .alloc_pd = mthca_alloc_pd, .alloc_ucontext = mthca_alloc_ucontext, .attach_mcast = mthca_multicast_attach, .create_ah = mthca_ah_create, .create_cq = mthca_create_cq, .create_qp = mthca_create_qp, .dealloc_pd = mthca_dealloc_pd, .dealloc_ucontext = mthca_dealloc_ucontext, .dereg_mr = mthca_dereg_mr, .destroy_ah = mthca_ah_destroy, .destroy_cq = mthca_destroy_cq, .destroy_qp = mthca_destroy_qp, .detach_mcast = mthca_multicast_detach, .device_group = &mthca_attr_group, .get_dev_fw_str = get_dev_fw_str, .get_dma_mr = mthca_get_dma_mr, .get_port_immutable = mthca_port_immutable, .mmap = mthca_mmap_uar, .modify_device = mthca_modify_device, .modify_port = mthca_modify_port, .modify_qp = mthca_modify_qp, .poll_cq = mthca_poll_cq, .process_mad = mthca_process_mad, .query_ah = mthca_ah_query, .query_device = mthca_query_device, .query_gid = mthca_query_gid, .query_pkey = mthca_query_pkey, .query_port = mthca_query_port, .query_qp = mthca_query_qp, .reg_user_mr = mthca_reg_user_mr, .resize_cq = mthca_resize_cq, INIT_RDMA_OBJ_SIZE(ib_ah, mthca_ah, ibah), INIT_RDMA_OBJ_SIZE(ib_cq, mthca_cq, ibcq), INIT_RDMA_OBJ_SIZE(ib_pd, mthca_pd, ibpd), INIT_RDMA_OBJ_SIZE(ib_qp, mthca_qp, ibqp), INIT_RDMA_OBJ_SIZE(ib_ucontext, mthca_ucontext, ibucontext), }; static const struct ib_device_ops mthca_dev_arbel_srq_ops = { .create_srq = mthca_create_srq, .destroy_srq = mthca_destroy_srq, .modify_srq = mthca_modify_srq, .post_srq_recv = mthca_arbel_post_srq_recv, .query_srq = mthca_query_srq, INIT_RDMA_OBJ_SIZE(ib_srq, mthca_srq, ibsrq), }; static const struct ib_device_ops mthca_dev_tavor_srq_ops = { .create_srq = mthca_create_srq, .destroy_srq = mthca_destroy_srq, .modify_srq = mthca_modify_srq, .post_srq_recv = mthca_tavor_post_srq_recv, .query_srq = mthca_query_srq, INIT_RDMA_OBJ_SIZE(ib_srq, mthca_srq, ibsrq), }; static const struct ib_device_ops mthca_dev_arbel_ops = { .post_recv = mthca_arbel_post_receive, .post_send = mthca_arbel_post_send, .req_notify_cq = mthca_arbel_arm_cq, }; static const struct ib_device_ops mthca_dev_tavor_ops = { .post_recv = mthca_tavor_post_receive, .post_send = mthca_tavor_post_send, .req_notify_cq = mthca_tavor_arm_cq, }; int mthca_register_device(struct mthca_dev *dev) { int ret; ret = mthca_init_node_data(dev); if (ret) return ret; dev->ib_dev.node_type = RDMA_NODE_IB_CA; dev->ib_dev.phys_port_cnt = dev->limits.num_ports; dev->ib_dev.num_comp_vectors = 1; dev->ib_dev.dev.parent = &dev->pdev->dev; if (dev->mthca_flags & MTHCA_FLAG_SRQ) { if (mthca_is_memfree(dev)) ib_set_device_ops(&dev->ib_dev, &mthca_dev_arbel_srq_ops); else ib_set_device_ops(&dev->ib_dev, &mthca_dev_tavor_srq_ops); } ib_set_device_ops(&dev->ib_dev, &mthca_dev_ops); if (mthca_is_memfree(dev)) ib_set_device_ops(&dev->ib_dev, &mthca_dev_arbel_ops); else ib_set_device_ops(&dev->ib_dev, &mthca_dev_tavor_ops); mutex_init(&dev->cap_mask_mutex); ret = ib_register_device(&dev->ib_dev, "mthca%d", &dev->pdev->dev); if (ret) return ret; mthca_start_catas_poll(dev); return 0; } void mthca_unregister_device(struct mthca_dev *dev) { mthca_stop_catas_poll(dev); ib_unregister_device(&dev->ib_dev); }
linux-master
drivers/infiniband/hw/mthca/mthca_provider.c
/* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/bug.h> #include <linux/errno.h> #include <linux/spinlock.h> #include "usnic_log.h" #include "usnic_vnic.h" #include "usnic_fwd.h" #include "usnic_uiom.h" #include "usnic_debugfs.h" #include "usnic_ib_qp_grp.h" #include "usnic_ib_sysfs.h" #include "usnic_transport.h" #define DFLT_RQ_IDX 0 const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state) { switch (state) { case IB_QPS_RESET: return "Rst"; case IB_QPS_INIT: return "Init"; case IB_QPS_RTR: return "RTR"; case IB_QPS_RTS: return "RTS"; case IB_QPS_SQD: return "SQD"; case IB_QPS_SQE: return "SQE"; case IB_QPS_ERR: return "ERR"; default: return "UNKNOWN STATE"; } } int usnic_ib_qp_grp_dump_hdr(char *buf, int buf_sz) { return scnprintf(buf, buf_sz, "|QPN\t|State\t|PID\t|VF Idx\t|Fil ID"); } int usnic_ib_qp_grp_dump_rows(void *obj, char *buf, int buf_sz) { struct usnic_ib_qp_grp *qp_grp = obj; struct usnic_ib_qp_grp_flow *default_flow; if (obj) { default_flow = list_first_entry(&qp_grp->flows_lst, struct usnic_ib_qp_grp_flow, link); return scnprintf(buf, buf_sz, "|%d\t|%s\t|%d\t|%hu\t|%d", qp_grp->ibqp.qp_num, usnic_ib_qp_grp_state_to_string( qp_grp->state), qp_grp->owner_pid, usnic_vnic_get_index(qp_grp->vf->vnic), default_flow->flow->flow_id); } else { return scnprintf(buf, buf_sz, "|N/A\t|N/A\t|N/A\t|N/A\t|N/A"); } } static struct usnic_vnic_res_chunk * get_qp_res_chunk(struct usnic_ib_qp_grp *qp_grp) { lockdep_assert_held(&qp_grp->lock); /* * The QP res chunk, used to derive qp indices, * are just indices of the RQs */ return usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ); } static int enable_qp_grp(struct usnic_ib_qp_grp *qp_grp) { int status; int i, vnic_idx; struct usnic_vnic_res_chunk *res_chunk; struct usnic_vnic_res *res; lockdep_assert_held(&qp_grp->lock); vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic); res_chunk = get_qp_res_chunk(qp_grp); if (IS_ERR(res_chunk)) { usnic_err("Unable to get qp res with err %ld\n", PTR_ERR(res_chunk)); return PTR_ERR(res_chunk); } for (i = 0; i < res_chunk->cnt; i++) { res = res_chunk->res[i]; status = usnic_fwd_enable_qp(qp_grp->ufdev, vnic_idx, res->vnic_idx); if (status) { usnic_err("Failed to enable qp %d of %s:%d\n with err %d\n", res->vnic_idx, qp_grp->ufdev->name, vnic_idx, status); goto out_err; } } return 0; out_err: for (i--; i >= 0; i--) { res = res_chunk->res[i]; usnic_fwd_disable_qp(qp_grp->ufdev, vnic_idx, res->vnic_idx); } return status; } static int disable_qp_grp(struct usnic_ib_qp_grp *qp_grp) { int i, vnic_idx; struct usnic_vnic_res_chunk *res_chunk; struct usnic_vnic_res *res; int status = 0; lockdep_assert_held(&qp_grp->lock); vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic); res_chunk = get_qp_res_chunk(qp_grp); if (IS_ERR(res_chunk)) { usnic_err("Unable to get qp res with err %ld\n", PTR_ERR(res_chunk)); return PTR_ERR(res_chunk); } for (i = 0; i < res_chunk->cnt; i++) { res = res_chunk->res[i]; status = usnic_fwd_disable_qp(qp_grp->ufdev, vnic_idx, res->vnic_idx); if (status) { usnic_err("Failed to disable rq %d of %s:%d\n with err %d\n", res->vnic_idx, qp_grp->ufdev->name, vnic_idx, status); } } return status; } static int init_filter_action(struct usnic_ib_qp_grp *qp_grp, struct usnic_filter_action *uaction) { struct usnic_vnic_res_chunk *res_chunk; res_chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ); if (IS_ERR(res_chunk)) { usnic_err("Unable to get %s with err %ld\n", usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ), PTR_ERR(res_chunk)); return PTR_ERR(res_chunk); } uaction->vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic); uaction->action.type = FILTER_ACTION_RQ_STEERING; uaction->action.u.rq_idx = res_chunk->res[DFLT_RQ_IDX]->vnic_idx; return 0; } static struct usnic_ib_qp_grp_flow* create_roce_custom_flow(struct usnic_ib_qp_grp *qp_grp, struct usnic_transport_spec *trans_spec) { uint16_t port_num; int err; struct filter filter; struct usnic_filter_action uaction; struct usnic_ib_qp_grp_flow *qp_flow; struct usnic_fwd_flow *flow; enum usnic_transport_type trans_type; trans_type = trans_spec->trans_type; port_num = trans_spec->usnic_roce.port_num; /* Reserve Port */ port_num = usnic_transport_rsrv_port(trans_type, port_num); if (port_num == 0) return ERR_PTR(-EINVAL); /* Create Flow */ usnic_fwd_init_usnic_filter(&filter, port_num); err = init_filter_action(qp_grp, &uaction); if (err) goto out_unreserve_port; flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction); if (IS_ERR_OR_NULL(flow)) { err = flow ? PTR_ERR(flow) : -EFAULT; goto out_unreserve_port; } /* Create Flow Handle */ qp_flow = kzalloc(sizeof(*qp_flow), GFP_ATOMIC); if (!qp_flow) { err = -ENOMEM; goto out_dealloc_flow; } qp_flow->flow = flow; qp_flow->trans_type = trans_type; qp_flow->usnic_roce.port_num = port_num; qp_flow->qp_grp = qp_grp; return qp_flow; out_dealloc_flow: usnic_fwd_dealloc_flow(flow); out_unreserve_port: usnic_transport_unrsrv_port(trans_type, port_num); return ERR_PTR(err); } static void release_roce_custom_flow(struct usnic_ib_qp_grp_flow *qp_flow) { usnic_fwd_dealloc_flow(qp_flow->flow); usnic_transport_unrsrv_port(qp_flow->trans_type, qp_flow->usnic_roce.port_num); kfree(qp_flow); } static struct usnic_ib_qp_grp_flow* create_udp_flow(struct usnic_ib_qp_grp *qp_grp, struct usnic_transport_spec *trans_spec) { struct socket *sock; int sock_fd; int err; struct filter filter; struct usnic_filter_action uaction; struct usnic_ib_qp_grp_flow *qp_flow; struct usnic_fwd_flow *flow; enum usnic_transport_type trans_type; uint32_t addr; uint16_t port_num; int proto; trans_type = trans_spec->trans_type; sock_fd = trans_spec->udp.sock_fd; /* Get and check socket */ sock = usnic_transport_get_socket(sock_fd); if (IS_ERR_OR_NULL(sock)) return ERR_CAST(sock); err = usnic_transport_sock_get_addr(sock, &proto, &addr, &port_num); if (err) goto out_put_sock; if (proto != IPPROTO_UDP) { usnic_err("Protocol for fd %d is not UDP", sock_fd); err = -EPERM; goto out_put_sock; } /* Create flow */ usnic_fwd_init_udp_filter(&filter, addr, port_num); err = init_filter_action(qp_grp, &uaction); if (err) goto out_put_sock; flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction); if (IS_ERR_OR_NULL(flow)) { err = flow ? PTR_ERR(flow) : -EFAULT; goto out_put_sock; } /* Create qp_flow */ qp_flow = kzalloc(sizeof(*qp_flow), GFP_ATOMIC); if (!qp_flow) { err = -ENOMEM; goto out_dealloc_flow; } qp_flow->flow = flow; qp_flow->trans_type = trans_type; qp_flow->udp.sock = sock; qp_flow->qp_grp = qp_grp; return qp_flow; out_dealloc_flow: usnic_fwd_dealloc_flow(flow); out_put_sock: usnic_transport_put_socket(sock); return ERR_PTR(err); } static void release_udp_flow(struct usnic_ib_qp_grp_flow *qp_flow) { usnic_fwd_dealloc_flow(qp_flow->flow); usnic_transport_put_socket(qp_flow->udp.sock); kfree(qp_flow); } static struct usnic_ib_qp_grp_flow* create_and_add_flow(struct usnic_ib_qp_grp *qp_grp, struct usnic_transport_spec *trans_spec) { struct usnic_ib_qp_grp_flow *qp_flow; enum usnic_transport_type trans_type; trans_type = trans_spec->trans_type; switch (trans_type) { case USNIC_TRANSPORT_ROCE_CUSTOM: qp_flow = create_roce_custom_flow(qp_grp, trans_spec); break; case USNIC_TRANSPORT_IPV4_UDP: qp_flow = create_udp_flow(qp_grp, trans_spec); break; default: usnic_err("Unsupported transport %u\n", trans_spec->trans_type); return ERR_PTR(-EINVAL); } if (!IS_ERR_OR_NULL(qp_flow)) { list_add_tail(&qp_flow->link, &qp_grp->flows_lst); usnic_debugfs_flow_add(qp_flow); } return qp_flow; } static void release_and_remove_flow(struct usnic_ib_qp_grp_flow *qp_flow) { usnic_debugfs_flow_remove(qp_flow); list_del(&qp_flow->link); switch (qp_flow->trans_type) { case USNIC_TRANSPORT_ROCE_CUSTOM: release_roce_custom_flow(qp_flow); break; case USNIC_TRANSPORT_IPV4_UDP: release_udp_flow(qp_flow); break; default: WARN(1, "Unsupported transport %u\n", qp_flow->trans_type); break; } } static void release_and_remove_all_flows(struct usnic_ib_qp_grp *qp_grp) { struct usnic_ib_qp_grp_flow *qp_flow, *tmp; list_for_each_entry_safe(qp_flow, tmp, &qp_grp->flows_lst, link) release_and_remove_flow(qp_flow); } int usnic_ib_qp_grp_modify(struct usnic_ib_qp_grp *qp_grp, enum ib_qp_state new_state, void *data) { int status = 0; struct ib_event ib_event; enum ib_qp_state old_state; struct usnic_transport_spec *trans_spec; struct usnic_ib_qp_grp_flow *qp_flow; old_state = qp_grp->state; trans_spec = (struct usnic_transport_spec *) data; spin_lock(&qp_grp->lock); switch (new_state) { case IB_QPS_RESET: switch (old_state) { case IB_QPS_RESET: /* NO-OP */ break; case IB_QPS_INIT: release_and_remove_all_flows(qp_grp); status = 0; break; case IB_QPS_RTR: case IB_QPS_RTS: case IB_QPS_ERR: status = disable_qp_grp(qp_grp); release_and_remove_all_flows(qp_grp); break; default: status = -EINVAL; } break; case IB_QPS_INIT: switch (old_state) { case IB_QPS_RESET: if (trans_spec) { qp_flow = create_and_add_flow(qp_grp, trans_spec); if (IS_ERR_OR_NULL(qp_flow)) { status = qp_flow ? PTR_ERR(qp_flow) : -EFAULT; break; } } else { /* * Optional to specify filters. */ status = 0; } break; case IB_QPS_INIT: if (trans_spec) { qp_flow = create_and_add_flow(qp_grp, trans_spec); if (IS_ERR_OR_NULL(qp_flow)) { status = qp_flow ? PTR_ERR(qp_flow) : -EFAULT; break; } } else { /* * Doesn't make sense to go into INIT state * from INIT state w/o adding filters. */ status = -EINVAL; } break; case IB_QPS_RTR: status = disable_qp_grp(qp_grp); break; case IB_QPS_RTS: status = disable_qp_grp(qp_grp); break; default: status = -EINVAL; } break; case IB_QPS_RTR: switch (old_state) { case IB_QPS_INIT: status = enable_qp_grp(qp_grp); break; default: status = -EINVAL; } break; case IB_QPS_RTS: switch (old_state) { case IB_QPS_RTR: /* NO-OP FOR NOW */ break; default: status = -EINVAL; } break; case IB_QPS_ERR: ib_event.device = &qp_grp->vf->pf->ib_dev; ib_event.element.qp = &qp_grp->ibqp; ib_event.event = IB_EVENT_QP_FATAL; switch (old_state) { case IB_QPS_RESET: qp_grp->ibqp.event_handler(&ib_event, qp_grp->ibqp.qp_context); break; case IB_QPS_INIT: release_and_remove_all_flows(qp_grp); qp_grp->ibqp.event_handler(&ib_event, qp_grp->ibqp.qp_context); break; case IB_QPS_RTR: case IB_QPS_RTS: status = disable_qp_grp(qp_grp); release_and_remove_all_flows(qp_grp); qp_grp->ibqp.event_handler(&ib_event, qp_grp->ibqp.qp_context); break; default: status = -EINVAL; } break; default: status = -EINVAL; } spin_unlock(&qp_grp->lock); if (!status) { qp_grp->state = new_state; usnic_info("Transitioned %u from %s to %s", qp_grp->grp_id, usnic_ib_qp_grp_state_to_string(old_state), usnic_ib_qp_grp_state_to_string(new_state)); } else { usnic_err("Failed to transition %u from %s to %s", qp_grp->grp_id, usnic_ib_qp_grp_state_to_string(old_state), usnic_ib_qp_grp_state_to_string(new_state)); } return status; } static struct usnic_vnic_res_chunk** alloc_res_chunk_list(struct usnic_vnic *vnic, struct usnic_vnic_res_spec *res_spec, void *owner_obj) { enum usnic_vnic_res_type res_type; struct usnic_vnic_res_chunk **res_chunk_list; int err, i, res_cnt, res_lst_sz; for (res_lst_sz = 0; res_spec->resources[res_lst_sz].type != USNIC_VNIC_RES_TYPE_EOL; res_lst_sz++) { /* Do Nothing */ } res_chunk_list = kcalloc(res_lst_sz + 1, sizeof(*res_chunk_list), GFP_ATOMIC); if (!res_chunk_list) return ERR_PTR(-ENOMEM); for (i = 0; res_spec->resources[i].type != USNIC_VNIC_RES_TYPE_EOL; i++) { res_type = res_spec->resources[i].type; res_cnt = res_spec->resources[i].cnt; res_chunk_list[i] = usnic_vnic_get_resources(vnic, res_type, res_cnt, owner_obj); if (IS_ERR_OR_NULL(res_chunk_list[i])) { err = res_chunk_list[i] ? PTR_ERR(res_chunk_list[i]) : -ENOMEM; usnic_err("Failed to get %s from %s with err %d\n", usnic_vnic_res_type_to_str(res_type), usnic_vnic_pci_name(vnic), err); goto out_free_res; } } return res_chunk_list; out_free_res: for (i--; i >= 0; i--) usnic_vnic_put_resources(res_chunk_list[i]); kfree(res_chunk_list); return ERR_PTR(err); } static void free_qp_grp_res(struct usnic_vnic_res_chunk **res_chunk_list) { int i; for (i = 0; res_chunk_list[i]; i++) usnic_vnic_put_resources(res_chunk_list[i]); kfree(res_chunk_list); } static int qp_grp_and_vf_bind(struct usnic_ib_vf *vf, struct usnic_ib_pd *pd, struct usnic_ib_qp_grp *qp_grp) { int err; struct pci_dev *pdev; lockdep_assert_held(&vf->lock); pdev = usnic_vnic_get_pdev(vf->vnic); if (vf->qp_grp_ref_cnt == 0) { err = usnic_uiom_attach_dev_to_pd(pd->umem_pd, &pdev->dev); if (err) { usnic_err("Failed to attach %s to domain\n", pci_name(pdev)); return err; } vf->pd = pd; } vf->qp_grp_ref_cnt++; WARN_ON(vf->pd != pd); qp_grp->vf = vf; return 0; } static void qp_grp_and_vf_unbind(struct usnic_ib_qp_grp *qp_grp) { struct pci_dev *pdev; struct usnic_ib_pd *pd; lockdep_assert_held(&qp_grp->vf->lock); pd = qp_grp->vf->pd; pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic); if (--qp_grp->vf->qp_grp_ref_cnt == 0) { qp_grp->vf->pd = NULL; usnic_uiom_detach_dev_from_pd(pd->umem_pd, &pdev->dev); } qp_grp->vf = NULL; } static void log_spec(struct usnic_vnic_res_spec *res_spec) { char buf[512]; usnic_vnic_spec_dump(buf, sizeof(buf), res_spec); usnic_dbg("%s\n", buf); } static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow, uint32_t *id) { enum usnic_transport_type trans_type = qp_flow->trans_type; int err; uint16_t port_num = 0; switch (trans_type) { case USNIC_TRANSPORT_ROCE_CUSTOM: *id = qp_flow->usnic_roce.port_num; break; case USNIC_TRANSPORT_IPV4_UDP: err = usnic_transport_sock_get_addr(qp_flow->udp.sock, NULL, NULL, &port_num); if (err) return err; /* * Copy port_num to stack first and then to *id, * so that the short to int cast works for little * and big endian systems. */ *id = port_num; break; default: usnic_err("Unsupported transport %u\n", trans_type); return -EINVAL; } return 0; } int usnic_ib_qp_grp_create(struct usnic_ib_qp_grp *qp_grp, struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf, struct usnic_ib_pd *pd, struct usnic_vnic_res_spec *res_spec, struct usnic_transport_spec *transport_spec) { int err; enum usnic_transport_type transport = transport_spec->trans_type; struct usnic_ib_qp_grp_flow *qp_flow; lockdep_assert_held(&vf->lock); err = usnic_vnic_res_spec_satisfied(&min_transport_spec[transport], res_spec); if (err) { usnic_err("Spec does not meet minimum req for transport %d\n", transport); log_spec(res_spec); return err; } qp_grp->res_chunk_list = alloc_res_chunk_list(vf->vnic, res_spec, qp_grp); if (IS_ERR_OR_NULL(qp_grp->res_chunk_list)) return qp_grp->res_chunk_list ? PTR_ERR(qp_grp->res_chunk_list) : -ENOMEM; err = qp_grp_and_vf_bind(vf, pd, qp_grp); if (err) goto out_free_res; INIT_LIST_HEAD(&qp_grp->flows_lst); spin_lock_init(&qp_grp->lock); qp_grp->ufdev = ufdev; qp_grp->state = IB_QPS_RESET; qp_grp->owner_pid = current->pid; qp_flow = create_and_add_flow(qp_grp, transport_spec); if (IS_ERR_OR_NULL(qp_flow)) { usnic_err("Unable to create and add flow with err %ld\n", PTR_ERR(qp_flow)); err = qp_flow ? PTR_ERR(qp_flow) : -EFAULT; goto out_qp_grp_vf_unbind; } err = qp_grp_id_from_flow(qp_flow, &qp_grp->grp_id); if (err) goto out_release_flow; qp_grp->ibqp.qp_num = qp_grp->grp_id; usnic_ib_sysfs_qpn_add(qp_grp); return 0; out_release_flow: release_and_remove_flow(qp_flow); out_qp_grp_vf_unbind: qp_grp_and_vf_unbind(qp_grp); out_free_res: free_qp_grp_res(qp_grp->res_chunk_list); return err; } void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp) { WARN_ON(qp_grp->state != IB_QPS_RESET); lockdep_assert_held(&qp_grp->vf->lock); release_and_remove_all_flows(qp_grp); usnic_ib_sysfs_qpn_remove(qp_grp); qp_grp_and_vf_unbind(qp_grp); free_qp_grp_res(qp_grp->res_chunk_list); } struct usnic_vnic_res_chunk* usnic_ib_qp_grp_get_chunk(struct usnic_ib_qp_grp *qp_grp, enum usnic_vnic_res_type res_type) { int i; for (i = 0; qp_grp->res_chunk_list[i]; i++) { if (qp_grp->res_chunk_list[i]->type == res_type) return qp_grp->res_chunk_list[i]; } return ERR_PTR(-EINVAL); }
linux-master
drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
/* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Author: Upinder Malhi <[email protected]> * Author: Anant Deepak <[email protected]> * Author: Cesare Cantu' <[email protected]> * Author: Jeff Squyres <[email protected]> * Author: Kiran Thirumalai <[email protected]> * Author: Xuyang Wang <[email protected]> * Author: Reese Faucette <[email protected]> * */ #include <linux/module.h> #include <linux/inetdevice.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/pci.h> #include <linux/netdevice.h> #include <rdma/ib_user_verbs.h> #include <rdma/ib_addr.h> #include "usnic_abi.h" #include "usnic_common_util.h" #include "usnic_ib.h" #include "usnic_ib_qp_grp.h" #include "usnic_log.h" #include "usnic_fwd.h" #include "usnic_debugfs.h" #include "usnic_ib_verbs.h" #include "usnic_transport.h" #include "usnic_uiom.h" #include "usnic_ib_sysfs.h" unsigned int usnic_log_lvl = USNIC_LOG_LVL_ERR; unsigned int usnic_ib_share_vf = 1; static const char usnic_version[] = DRV_NAME ": Cisco VIC (USNIC) Verbs Driver v" DRV_VERSION " (" DRV_RELDATE ")\n"; static DEFINE_MUTEX(usnic_ib_ibdev_list_lock); static LIST_HEAD(usnic_ib_ibdev_list); /* Callback dump funcs */ static int usnic_ib_dump_vf_hdr(void *obj, char *buf, int buf_sz) { struct usnic_ib_vf *vf = obj; return scnprintf(buf, buf_sz, "PF: %s ", dev_name(&vf->pf->ib_dev.dev)); } /* End callback dump funcs */ static void usnic_ib_dump_vf(struct usnic_ib_vf *vf, char *buf, int buf_sz) { usnic_vnic_dump(vf->vnic, buf, buf_sz, vf, usnic_ib_dump_vf_hdr, usnic_ib_qp_grp_dump_hdr, usnic_ib_qp_grp_dump_rows); } void usnic_ib_log_vf(struct usnic_ib_vf *vf) { char *buf = kzalloc(1000, GFP_KERNEL); if (!buf) return; usnic_ib_dump_vf(vf, buf, 1000); usnic_dbg("%s\n", buf); kfree(buf); } /* Start of netdev section */ static void usnic_ib_qp_grp_modify_active_to_err(struct usnic_ib_dev *us_ibdev) { struct usnic_ib_ucontext *ctx; struct usnic_ib_qp_grp *qp_grp; enum ib_qp_state cur_state; int status; BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock)); list_for_each_entry(ctx, &us_ibdev->ctx_list, link) { list_for_each_entry(qp_grp, &ctx->qp_grp_list, link) { cur_state = qp_grp->state; if (cur_state == IB_QPS_INIT || cur_state == IB_QPS_RTR || cur_state == IB_QPS_RTS) { status = usnic_ib_qp_grp_modify(qp_grp, IB_QPS_ERR, NULL); if (status) { usnic_err("Failed to transition qp grp %u from %s to %s\n", qp_grp->grp_id, usnic_ib_qp_grp_state_to_string (cur_state), usnic_ib_qp_grp_state_to_string (IB_QPS_ERR)); } } } } } static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev, unsigned long event) { struct net_device *netdev; struct ib_event ib_event; memset(&ib_event, 0, sizeof(ib_event)); mutex_lock(&us_ibdev->usdev_lock); netdev = us_ibdev->netdev; switch (event) { case NETDEV_REBOOT: usnic_info("PF Reset on %s\n", dev_name(&us_ibdev->ib_dev.dev)); usnic_ib_qp_grp_modify_active_to_err(us_ibdev); ib_event.event = IB_EVENT_PORT_ERR; ib_event.device = &us_ibdev->ib_dev; ib_event.element.port_num = 1; ib_dispatch_event(&ib_event); break; case NETDEV_UP: case NETDEV_DOWN: case NETDEV_CHANGE: if (!us_ibdev->ufdev->link_up && netif_carrier_ok(netdev)) { usnic_fwd_carrier_up(us_ibdev->ufdev); usnic_info("Link UP on %s\n", dev_name(&us_ibdev->ib_dev.dev)); ib_event.event = IB_EVENT_PORT_ACTIVE; ib_event.device = &us_ibdev->ib_dev; ib_event.element.port_num = 1; ib_dispatch_event(&ib_event); } else if (us_ibdev->ufdev->link_up && !netif_carrier_ok(netdev)) { usnic_fwd_carrier_down(us_ibdev->ufdev); usnic_info("Link DOWN on %s\n", dev_name(&us_ibdev->ib_dev.dev)); usnic_ib_qp_grp_modify_active_to_err(us_ibdev); ib_event.event = IB_EVENT_PORT_ERR; ib_event.device = &us_ibdev->ib_dev; ib_event.element.port_num = 1; ib_dispatch_event(&ib_event); } else { usnic_dbg("Ignoring %s on %s\n", netdev_cmd_to_name(event), dev_name(&us_ibdev->ib_dev.dev)); } break; case NETDEV_CHANGEADDR: if (!memcmp(us_ibdev->ufdev->mac, netdev->dev_addr, sizeof(us_ibdev->ufdev->mac))) { usnic_dbg("Ignoring addr change on %s\n", dev_name(&us_ibdev->ib_dev.dev)); } else { usnic_info(" %s old mac: %pM new mac: %pM\n", dev_name(&us_ibdev->ib_dev.dev), us_ibdev->ufdev->mac, netdev->dev_addr); usnic_fwd_set_mac(us_ibdev->ufdev, netdev->dev_addr); usnic_ib_qp_grp_modify_active_to_err(us_ibdev); ib_event.event = IB_EVENT_GID_CHANGE; ib_event.device = &us_ibdev->ib_dev; ib_event.element.port_num = 1; ib_dispatch_event(&ib_event); } break; case NETDEV_CHANGEMTU: if (us_ibdev->ufdev->mtu != netdev->mtu) { usnic_info("MTU Change on %s old: %u new: %u\n", dev_name(&us_ibdev->ib_dev.dev), us_ibdev->ufdev->mtu, netdev->mtu); usnic_fwd_set_mtu(us_ibdev->ufdev, netdev->mtu); usnic_ib_qp_grp_modify_active_to_err(us_ibdev); } else { usnic_dbg("Ignoring MTU change on %s\n", dev_name(&us_ibdev->ib_dev.dev)); } break; default: usnic_dbg("Ignoring event %s on %s", netdev_cmd_to_name(event), dev_name(&us_ibdev->ib_dev.dev)); } mutex_unlock(&us_ibdev->usdev_lock); } static int usnic_ib_netdevice_event(struct notifier_block *notifier, unsigned long event, void *ptr) { struct usnic_ib_dev *us_ibdev; struct ib_device *ibdev; struct net_device *netdev = netdev_notifier_info_to_dev(ptr); ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_USNIC); if (!ibdev) return NOTIFY_DONE; us_ibdev = container_of(ibdev, struct usnic_ib_dev, ib_dev); usnic_ib_handle_usdev_event(us_ibdev, event); ib_device_put(ibdev); return NOTIFY_DONE; } static struct notifier_block usnic_ib_netdevice_notifier = { .notifier_call = usnic_ib_netdevice_event }; /* End of netdev section */ /* Start of inet section */ static int usnic_ib_handle_inet_event(struct usnic_ib_dev *us_ibdev, unsigned long event, void *ptr) { struct in_ifaddr *ifa = ptr; struct ib_event ib_event; mutex_lock(&us_ibdev->usdev_lock); switch (event) { case NETDEV_DOWN: usnic_info("%s via ip notifiers", netdev_cmd_to_name(event)); usnic_fwd_del_ipaddr(us_ibdev->ufdev); usnic_ib_qp_grp_modify_active_to_err(us_ibdev); ib_event.event = IB_EVENT_GID_CHANGE; ib_event.device = &us_ibdev->ib_dev; ib_event.element.port_num = 1; ib_dispatch_event(&ib_event); break; case NETDEV_UP: usnic_fwd_add_ipaddr(us_ibdev->ufdev, ifa->ifa_address); usnic_info("%s via ip notifiers: ip %pI4", netdev_cmd_to_name(event), &us_ibdev->ufdev->inaddr); ib_event.event = IB_EVENT_GID_CHANGE; ib_event.device = &us_ibdev->ib_dev; ib_event.element.port_num = 1; ib_dispatch_event(&ib_event); break; default: usnic_info("Ignoring event %s on %s", netdev_cmd_to_name(event), dev_name(&us_ibdev->ib_dev.dev)); } mutex_unlock(&us_ibdev->usdev_lock); return NOTIFY_DONE; } static int usnic_ib_inetaddr_event(struct notifier_block *notifier, unsigned long event, void *ptr) { struct usnic_ib_dev *us_ibdev; struct in_ifaddr *ifa = ptr; struct net_device *netdev = ifa->ifa_dev->dev; struct ib_device *ibdev; ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_USNIC); if (!ibdev) return NOTIFY_DONE; us_ibdev = container_of(ibdev, struct usnic_ib_dev, ib_dev); usnic_ib_handle_inet_event(us_ibdev, event, ptr); ib_device_put(ibdev); return NOTIFY_DONE; } static struct notifier_block usnic_ib_inetaddr_notifier = { .notifier_call = usnic_ib_inetaddr_event }; /* End of inet section*/ static int usnic_port_immutable(struct ib_device *ibdev, u32 port_num, struct ib_port_immutable *immutable) { struct ib_port_attr attr; int err; immutable->core_cap_flags = RDMA_CORE_PORT_USNIC; err = ib_query_port(ibdev, port_num, &attr); if (err) return err; immutable->gid_tbl_len = attr.gid_tbl_len; return 0; } static void usnic_get_dev_fw_str(struct ib_device *device, char *str) { struct usnic_ib_dev *us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev); struct ethtool_drvinfo info; mutex_lock(&us_ibdev->usdev_lock); us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info); mutex_unlock(&us_ibdev->usdev_lock); snprintf(str, IB_FW_VERSION_NAME_MAX, "%s", info.fw_version); } static const struct ib_device_ops usnic_dev_ops = { .owner = THIS_MODULE, .driver_id = RDMA_DRIVER_USNIC, .uverbs_abi_ver = USNIC_UVERBS_ABI_VERSION, .alloc_pd = usnic_ib_alloc_pd, .alloc_ucontext = usnic_ib_alloc_ucontext, .create_cq = usnic_ib_create_cq, .create_qp = usnic_ib_create_qp, .dealloc_pd = usnic_ib_dealloc_pd, .dealloc_ucontext = usnic_ib_dealloc_ucontext, .dereg_mr = usnic_ib_dereg_mr, .destroy_cq = usnic_ib_destroy_cq, .destroy_qp = usnic_ib_destroy_qp, .device_group = &usnic_attr_group, .get_dev_fw_str = usnic_get_dev_fw_str, .get_link_layer = usnic_ib_port_link_layer, .get_port_immutable = usnic_port_immutable, .mmap = usnic_ib_mmap, .modify_qp = usnic_ib_modify_qp, .query_device = usnic_ib_query_device, .query_gid = usnic_ib_query_gid, .query_port = usnic_ib_query_port, .query_qp = usnic_ib_query_qp, .reg_user_mr = usnic_ib_reg_mr, INIT_RDMA_OBJ_SIZE(ib_pd, usnic_ib_pd, ibpd), INIT_RDMA_OBJ_SIZE(ib_cq, usnic_ib_cq, ibcq), INIT_RDMA_OBJ_SIZE(ib_qp, usnic_ib_qp_grp, ibqp), INIT_RDMA_OBJ_SIZE(ib_ucontext, usnic_ib_ucontext, ibucontext), }; /* Start of PF discovery section */ static void *usnic_ib_device_add(struct pci_dev *dev) { struct usnic_ib_dev *us_ibdev; union ib_gid gid; struct in_device *ind; struct net_device *netdev; int ret; usnic_dbg("\n"); netdev = pci_get_drvdata(dev); us_ibdev = ib_alloc_device(usnic_ib_dev, ib_dev); if (!us_ibdev) { usnic_err("Device %s context alloc failed\n", netdev_name(pci_get_drvdata(dev))); return ERR_PTR(-EFAULT); } us_ibdev->ufdev = usnic_fwd_dev_alloc(dev); if (!us_ibdev->ufdev) { usnic_err("Failed to alloc ufdev for %s\n", pci_name(dev)); goto err_dealloc; } mutex_init(&us_ibdev->usdev_lock); INIT_LIST_HEAD(&us_ibdev->vf_dev_list); INIT_LIST_HEAD(&us_ibdev->ctx_list); us_ibdev->pdev = dev; us_ibdev->netdev = pci_get_drvdata(dev); us_ibdev->ib_dev.node_type = RDMA_NODE_USNIC_UDP; us_ibdev->ib_dev.phys_port_cnt = USNIC_IB_PORT_CNT; us_ibdev->ib_dev.num_comp_vectors = USNIC_IB_NUM_COMP_VECTORS; us_ibdev->ib_dev.dev.parent = &dev->dev; ib_set_device_ops(&us_ibdev->ib_dev, &usnic_dev_ops); ret = ib_device_set_netdev(&us_ibdev->ib_dev, us_ibdev->netdev, 1); if (ret) goto err_fwd_dealloc; dma_set_max_seg_size(&dev->dev, SZ_2G); if (ib_register_device(&us_ibdev->ib_dev, "usnic_%d", &dev->dev)) goto err_fwd_dealloc; usnic_fwd_set_mtu(us_ibdev->ufdev, us_ibdev->netdev->mtu); usnic_fwd_set_mac(us_ibdev->ufdev, us_ibdev->netdev->dev_addr); if (netif_carrier_ok(us_ibdev->netdev)) usnic_fwd_carrier_up(us_ibdev->ufdev); rcu_read_lock(); ind = __in_dev_get_rcu(netdev); if (ind) { const struct in_ifaddr *ifa; ifa = rcu_dereference(ind->ifa_list); if (ifa) usnic_fwd_add_ipaddr(us_ibdev->ufdev, ifa->ifa_address); } rcu_read_unlock(); usnic_mac_ip_to_gid(us_ibdev->netdev->perm_addr, us_ibdev->ufdev->inaddr, &gid.raw[0]); memcpy(&us_ibdev->ib_dev.node_guid, &gid.global.interface_id, sizeof(gid.global.interface_id)); kref_init(&us_ibdev->vf_cnt); usnic_info("Added ibdev: %s netdev: %s with mac %pM Link: %u MTU: %u\n", dev_name(&us_ibdev->ib_dev.dev), netdev_name(us_ibdev->netdev), us_ibdev->ufdev->mac, us_ibdev->ufdev->link_up, us_ibdev->ufdev->mtu); return us_ibdev; err_fwd_dealloc: usnic_fwd_dev_free(us_ibdev->ufdev); err_dealloc: usnic_err("failed -- deallocing device\n"); ib_dealloc_device(&us_ibdev->ib_dev); return NULL; } static void usnic_ib_device_remove(struct usnic_ib_dev *us_ibdev) { usnic_info("Unregistering %s\n", dev_name(&us_ibdev->ib_dev.dev)); usnic_ib_sysfs_unregister_usdev(us_ibdev); usnic_fwd_dev_free(us_ibdev->ufdev); ib_unregister_device(&us_ibdev->ib_dev); ib_dealloc_device(&us_ibdev->ib_dev); } static void usnic_ib_undiscover_pf(struct kref *kref) { struct usnic_ib_dev *us_ibdev, *tmp; struct pci_dev *dev; bool found = false; dev = container_of(kref, struct usnic_ib_dev, vf_cnt)->pdev; mutex_lock(&usnic_ib_ibdev_list_lock); list_for_each_entry_safe(us_ibdev, tmp, &usnic_ib_ibdev_list, ib_dev_link) { if (us_ibdev->pdev == dev) { list_del(&us_ibdev->ib_dev_link); found = true; break; } } mutex_unlock(&usnic_ib_ibdev_list_lock); if (found) usnic_ib_device_remove(us_ibdev); else WARN(1, "Failed to remove PF %s\n", pci_name(dev)); } static struct usnic_ib_dev *usnic_ib_discover_pf(struct usnic_vnic *vnic) { struct usnic_ib_dev *us_ibdev; struct pci_dev *parent_pci, *vf_pci; int err; vf_pci = usnic_vnic_get_pdev(vnic); parent_pci = pci_physfn(vf_pci); BUG_ON(!parent_pci); mutex_lock(&usnic_ib_ibdev_list_lock); list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) { if (us_ibdev->pdev == parent_pci) { kref_get(&us_ibdev->vf_cnt); goto out; } } us_ibdev = usnic_ib_device_add(parent_pci); if (IS_ERR_OR_NULL(us_ibdev)) { us_ibdev = us_ibdev ? us_ibdev : ERR_PTR(-EFAULT); goto out; } err = usnic_ib_sysfs_register_usdev(us_ibdev); if (err) { usnic_ib_device_remove(us_ibdev); us_ibdev = ERR_PTR(err); goto out; } list_add(&us_ibdev->ib_dev_link, &usnic_ib_ibdev_list); out: mutex_unlock(&usnic_ib_ibdev_list_lock); return us_ibdev; } /* End of PF discovery section */ /* Start of PCI section */ static const struct pci_device_id usnic_ib_pci_ids[] = { {PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC)}, {0,} }; static int usnic_ib_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int err; struct usnic_ib_dev *pf; struct usnic_ib_vf *vf; enum usnic_vnic_res_type res_type; if (!device_iommu_mapped(&pdev->dev)) { usnic_err("IOMMU required but not present or enabled. USNIC QPs will not function w/o enabling IOMMU\n"); return -EPERM; } vf = kzalloc(sizeof(*vf), GFP_KERNEL); if (!vf) return -ENOMEM; err = pci_enable_device(pdev); if (err) { usnic_err("Failed to enable %s with err %d\n", pci_name(pdev), err); goto out_clean_vf; } err = pci_request_regions(pdev, DRV_NAME); if (err) { usnic_err("Failed to request region for %s with err %d\n", pci_name(pdev), err); goto out_disable_device; } pci_set_master(pdev); pci_set_drvdata(pdev, vf); vf->vnic = usnic_vnic_alloc(pdev); if (IS_ERR_OR_NULL(vf->vnic)) { err = vf->vnic ? PTR_ERR(vf->vnic) : -ENOMEM; usnic_err("Failed to alloc vnic for %s with err %d\n", pci_name(pdev), err); goto out_release_regions; } pf = usnic_ib_discover_pf(vf->vnic); if (IS_ERR_OR_NULL(pf)) { usnic_err("Failed to discover pf of vnic %s with err%ld\n", pci_name(pdev), PTR_ERR(pf)); err = pf ? PTR_ERR(pf) : -EFAULT; goto out_clean_vnic; } vf->pf = pf; mutex_init(&vf->lock); mutex_lock(&pf->usdev_lock); list_add_tail(&vf->link, &pf->vf_dev_list); /* * Save max settings (will be same for each VF, easier to re-write than * to say "if (!set) { set_values(); set=1; } */ for (res_type = USNIC_VNIC_RES_TYPE_EOL+1; res_type < USNIC_VNIC_RES_TYPE_MAX; res_type++) { pf->vf_res_cnt[res_type] = usnic_vnic_res_cnt(vf->vnic, res_type); } mutex_unlock(&pf->usdev_lock); usnic_info("Registering usnic VF %s into PF %s\n", pci_name(pdev), dev_name(&pf->ib_dev.dev)); usnic_ib_log_vf(vf); return 0; out_clean_vnic: usnic_vnic_free(vf->vnic); out_release_regions: pci_set_drvdata(pdev, NULL); pci_release_regions(pdev); out_disable_device: pci_disable_device(pdev); out_clean_vf: kfree(vf); return err; } static void usnic_ib_pci_remove(struct pci_dev *pdev) { struct usnic_ib_vf *vf = pci_get_drvdata(pdev); struct usnic_ib_dev *pf = vf->pf; mutex_lock(&pf->usdev_lock); list_del(&vf->link); mutex_unlock(&pf->usdev_lock); kref_put(&pf->vf_cnt, usnic_ib_undiscover_pf); usnic_vnic_free(vf->vnic); pci_set_drvdata(pdev, NULL); pci_release_regions(pdev); pci_disable_device(pdev); kfree(vf); usnic_info("Removed VF %s\n", pci_name(pdev)); } /* PCI driver entry points */ static struct pci_driver usnic_ib_pci_driver = { .name = DRV_NAME, .id_table = usnic_ib_pci_ids, .probe = usnic_ib_pci_probe, .remove = usnic_ib_pci_remove, }; /* End of PCI section */ /* Start of module section */ static int __init usnic_ib_init(void) { int err; printk_once(KERN_INFO "%s", usnic_version); err = pci_register_driver(&usnic_ib_pci_driver); if (err) { usnic_err("Unable to register with PCI\n"); goto out_umem_fini; } err = register_netdevice_notifier(&usnic_ib_netdevice_notifier); if (err) { usnic_err("Failed to register netdev notifier\n"); goto out_pci_unreg; } err = register_inetaddr_notifier(&usnic_ib_inetaddr_notifier); if (err) { usnic_err("Failed to register inet addr notifier\n"); goto out_unreg_netdev_notifier; } err = usnic_transport_init(); if (err) { usnic_err("Failed to initialize transport\n"); goto out_unreg_inetaddr_notifier; } usnic_debugfs_init(); return 0; out_unreg_inetaddr_notifier: unregister_inetaddr_notifier(&usnic_ib_inetaddr_notifier); out_unreg_netdev_notifier: unregister_netdevice_notifier(&usnic_ib_netdevice_notifier); out_pci_unreg: pci_unregister_driver(&usnic_ib_pci_driver); out_umem_fini: return err; } static void __exit usnic_ib_destroy(void) { usnic_dbg("\n"); usnic_debugfs_exit(); usnic_transport_fini(); unregister_inetaddr_notifier(&usnic_ib_inetaddr_notifier); unregister_netdevice_notifier(&usnic_ib_netdevice_notifier); pci_unregister_driver(&usnic_ib_pci_driver); } MODULE_DESCRIPTION("Cisco VIC (usNIC) Verbs Driver"); MODULE_AUTHOR("Upinder Malhi <[email protected]>"); MODULE_LICENSE("Dual BSD/GPL"); module_param(usnic_log_lvl, uint, S_IRUGO | S_IWUSR); module_param(usnic_ib_share_vf, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(usnic_log_lvl, " Off=0, Err=1, Info=2, Debug=3"); MODULE_PARM_DESC(usnic_ib_share_vf, "Off=0, On=1 VF sharing amongst QPs"); MODULE_DEVICE_TABLE(pci, usnic_ib_pci_ids); module_init(usnic_ib_init); module_exit(usnic_ib_destroy); /* End of module section */
linux-master
drivers/infiniband/hw/usnic/usnic_ib_main.c
/* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/init.h> #include <linux/errno.h> #include <rdma/ib_user_verbs.h> #include <rdma/ib_addr.h> #include "usnic_common_util.h" #include "usnic_ib.h" #include "usnic_ib_qp_grp.h" #include "usnic_vnic.h" #include "usnic_ib_verbs.h" #include "usnic_ib_sysfs.h" #include "usnic_log.h" static ssize_t board_id_show(struct device *device, struct device_attribute *attr, char *buf) { struct usnic_ib_dev *us_ibdev = rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev); unsigned short subsystem_device_id; mutex_lock(&us_ibdev->usdev_lock); subsystem_device_id = us_ibdev->pdev->subsystem_device; mutex_unlock(&us_ibdev->usdev_lock); return sysfs_emit(buf, "%u\n", subsystem_device_id); } static DEVICE_ATTR_RO(board_id); /* * Report the configuration for this PF */ static ssize_t config_show(struct device *device, struct device_attribute *attr, char *buf) { struct usnic_ib_dev *us_ibdev = rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev); enum usnic_vnic_res_type res_type; int len; mutex_lock(&us_ibdev->usdev_lock); if (kref_read(&us_ibdev->vf_cnt) > 0) { char *busname; char *sep = ""; /* * bus name seems to come with annoying prefix. * Remove it if it is predictable */ busname = us_ibdev->pdev->bus->name; if (strncmp(busname, "PCI Bus ", 8) == 0) busname += 8; len = sysfs_emit(buf, "%s: %s:%d.%d, %s, %pM, %u VFs\n", dev_name(&us_ibdev->ib_dev.dev), busname, PCI_SLOT(us_ibdev->pdev->devfn), PCI_FUNC(us_ibdev->pdev->devfn), netdev_name(us_ibdev->netdev), us_ibdev->ufdev->mac, kref_read(&us_ibdev->vf_cnt)); len += sysfs_emit_at(buf, len, " Per VF:"); for (res_type = USNIC_VNIC_RES_TYPE_EOL; res_type < USNIC_VNIC_RES_TYPE_MAX; res_type++) { if (us_ibdev->vf_res_cnt[res_type] == 0) continue; len += sysfs_emit_at(buf, len, "%s %d %s", sep, us_ibdev->vf_res_cnt[res_type], usnic_vnic_res_type_to_str(res_type)); sep = ","; } len += sysfs_emit_at(buf, len, "\n"); } else { len = sysfs_emit(buf, "%s: no VFs\n", dev_name(&us_ibdev->ib_dev.dev)); } mutex_unlock(&us_ibdev->usdev_lock); return len; } static DEVICE_ATTR_RO(config); static ssize_t iface_show(struct device *device, struct device_attribute *attr, char *buf) { struct usnic_ib_dev *us_ibdev = rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev); return sysfs_emit(buf, "%s\n", netdev_name(us_ibdev->netdev)); } static DEVICE_ATTR_RO(iface); static ssize_t max_vf_show(struct device *device, struct device_attribute *attr, char *buf) { struct usnic_ib_dev *us_ibdev = rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev); return sysfs_emit(buf, "%u\n", kref_read(&us_ibdev->vf_cnt)); } static DEVICE_ATTR_RO(max_vf); static ssize_t qp_per_vf_show(struct device *device, struct device_attribute *attr, char *buf) { struct usnic_ib_dev *us_ibdev = rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev); int qp_per_vf; qp_per_vf = max(us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_WQ], us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_RQ]); return sysfs_emit(buf, "%d\n", qp_per_vf); } static DEVICE_ATTR_RO(qp_per_vf); static ssize_t cq_per_vf_show(struct device *device, struct device_attribute *attr, char *buf) { struct usnic_ib_dev *us_ibdev = rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev); return sysfs_emit(buf, "%d\n", us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ]); } static DEVICE_ATTR_RO(cq_per_vf); static struct attribute *usnic_class_attributes[] = { &dev_attr_board_id.attr, &dev_attr_config.attr, &dev_attr_iface.attr, &dev_attr_max_vf.attr, &dev_attr_qp_per_vf.attr, &dev_attr_cq_per_vf.attr, NULL }; const struct attribute_group usnic_attr_group = { .attrs = usnic_class_attributes, }; struct qpn_attribute { struct attribute attr; ssize_t (*show)(struct usnic_ib_qp_grp *, char *buf); }; /* * Definitions for supporting QPN entries in sysfs */ static ssize_t usnic_ib_qpn_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct usnic_ib_qp_grp *qp_grp; struct qpn_attribute *qpn_attr; qp_grp = container_of(kobj, struct usnic_ib_qp_grp, kobj); qpn_attr = container_of(attr, struct qpn_attribute, attr); return qpn_attr->show(qp_grp, buf); } static const struct sysfs_ops usnic_ib_qpn_sysfs_ops = { .show = usnic_ib_qpn_attr_show }; #define QPN_ATTR_RO(NAME) \ struct qpn_attribute qpn_attr_##NAME = __ATTR_RO(NAME) static ssize_t context_show(struct usnic_ib_qp_grp *qp_grp, char *buf) { return sysfs_emit(buf, "0x%p\n", qp_grp->ctx); } static ssize_t summary_show(struct usnic_ib_qp_grp *qp_grp, char *buf) { int i, j; struct usnic_vnic_res_chunk *res_chunk; struct usnic_vnic_res *vnic_res; int len; len = sysfs_emit(buf, "QPN: %d State: (%s) PID: %u VF Idx: %hu", qp_grp->ibqp.qp_num, usnic_ib_qp_grp_state_to_string(qp_grp->state), qp_grp->owner_pid, usnic_vnic_get_index(qp_grp->vf->vnic)); for (i = 0; qp_grp->res_chunk_list[i]; i++) { res_chunk = qp_grp->res_chunk_list[i]; for (j = 0; j < res_chunk->cnt; j++) { vnic_res = res_chunk->res[j]; len += sysfs_emit_at(buf, len, " %s[%d]", usnic_vnic_res_type_to_str(vnic_res->type), vnic_res->vnic_idx); } } len += sysfs_emit_at(buf, len, "\n"); return len; } static QPN_ATTR_RO(context); static QPN_ATTR_RO(summary); static struct attribute *usnic_ib_qpn_default_attrs[] = { &qpn_attr_context.attr, &qpn_attr_summary.attr, NULL }; ATTRIBUTE_GROUPS(usnic_ib_qpn_default); static struct kobj_type usnic_ib_qpn_type = { .sysfs_ops = &usnic_ib_qpn_sysfs_ops, .default_groups = usnic_ib_qpn_default_groups, }; int usnic_ib_sysfs_register_usdev(struct usnic_ib_dev *us_ibdev) { /* create kernel object for looking at individual QPs */ kobject_get(&us_ibdev->ib_dev.dev.kobj); us_ibdev->qpn_kobj = kobject_create_and_add("qpn", &us_ibdev->ib_dev.dev.kobj); if (us_ibdev->qpn_kobj == NULL) { kobject_put(&us_ibdev->ib_dev.dev.kobj); return -ENOMEM; } return 0; } void usnic_ib_sysfs_unregister_usdev(struct usnic_ib_dev *us_ibdev) { kobject_put(us_ibdev->qpn_kobj); } void usnic_ib_sysfs_qpn_add(struct usnic_ib_qp_grp *qp_grp) { struct usnic_ib_dev *us_ibdev; int err; us_ibdev = qp_grp->vf->pf; err = kobject_init_and_add(&qp_grp->kobj, &usnic_ib_qpn_type, kobject_get(us_ibdev->qpn_kobj), "%d", qp_grp->grp_id); if (err) { kobject_put(us_ibdev->qpn_kobj); return; } } void usnic_ib_sysfs_qpn_remove(struct usnic_ib_qp_grp *qp_grp) { struct usnic_ib_dev *us_ibdev; us_ibdev = qp_grp->vf->pf; kobject_put(&qp_grp->kobj); kobject_put(us_ibdev->qpn_kobj); }
linux-master
drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
/* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/debugfs.h> #include "usnic.h" #include "usnic_log.h" #include "usnic_debugfs.h" #include "usnic_ib_qp_grp.h" #include "usnic_transport.h" static struct dentry *debugfs_root; static struct dentry *flows_dentry; static ssize_t usnic_debugfs_buildinfo_read(struct file *f, char __user *data, size_t count, loff_t *ppos) { char buf[500]; int res; if (*ppos > 0) return 0; res = scnprintf(buf, sizeof(buf), "version: %s\n" "build date: %s\n", DRV_VERSION, DRV_RELDATE); return simple_read_from_buffer(data, count, ppos, buf, res); } static const struct file_operations usnic_debugfs_buildinfo_ops = { .owner = THIS_MODULE, .open = simple_open, .read = usnic_debugfs_buildinfo_read }; static ssize_t flowinfo_read(struct file *f, char __user *data, size_t count, loff_t *ppos) { struct usnic_ib_qp_grp_flow *qp_flow; int n; int left; char *ptr; char buf[512]; qp_flow = f->private_data; ptr = buf; left = count; if (*ppos > 0) return 0; spin_lock(&qp_flow->qp_grp->lock); n = scnprintf(ptr, left, "QP Grp ID: %d Transport: %s ", qp_flow->qp_grp->grp_id, usnic_transport_to_str(qp_flow->trans_type)); UPDATE_PTR_LEFT(n, ptr, left); if (qp_flow->trans_type == USNIC_TRANSPORT_ROCE_CUSTOM) { n = scnprintf(ptr, left, "Port_Num:%hu\n", qp_flow->usnic_roce.port_num); UPDATE_PTR_LEFT(n, ptr, left); } else if (qp_flow->trans_type == USNIC_TRANSPORT_IPV4_UDP) { n = usnic_transport_sock_to_str(ptr, left, qp_flow->udp.sock); UPDATE_PTR_LEFT(n, ptr, left); n = scnprintf(ptr, left, "\n"); UPDATE_PTR_LEFT(n, ptr, left); } spin_unlock(&qp_flow->qp_grp->lock); return simple_read_from_buffer(data, count, ppos, buf, ptr - buf); } static const struct file_operations flowinfo_ops = { .owner = THIS_MODULE, .open = simple_open, .read = flowinfo_read, }; void usnic_debugfs_init(void) { debugfs_root = debugfs_create_dir(DRV_NAME, NULL); flows_dentry = debugfs_create_dir("flows", debugfs_root); debugfs_create_file("build-info", S_IRUGO, debugfs_root, NULL, &usnic_debugfs_buildinfo_ops); } void usnic_debugfs_exit(void) { debugfs_remove_recursive(debugfs_root); debugfs_root = NULL; } void usnic_debugfs_flow_add(struct usnic_ib_qp_grp_flow *qp_flow) { scnprintf(qp_flow->dentry_name, sizeof(qp_flow->dentry_name), "%u", qp_flow->flow->flow_id); qp_flow->dbgfs_dentry = debugfs_create_file(qp_flow->dentry_name, S_IRUGO, flows_dentry, qp_flow, &flowinfo_ops); } void usnic_debugfs_flow_remove(struct usnic_ib_qp_grp_flow *qp_flow) { debugfs_remove(qp_flow->dbgfs_dentry); }
linux-master
drivers/infiniband/hw/usnic/usnic_debugfs.c
/* * Copyright (c) 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * Copyright (c) 2013 Cisco Systems. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/mm.h> #include <linux/dma-mapping.h> #include <linux/sched/signal.h> #include <linux/sched/mm.h> #include <linux/hugetlb.h> #include <linux/iommu.h> #include <linux/workqueue.h> #include <linux/list.h> #include <rdma/ib_verbs.h> #include "usnic_log.h" #include "usnic_uiom.h" #include "usnic_uiom_interval_tree.h" #define USNIC_UIOM_PAGE_CHUNK \ ((PAGE_SIZE - offsetof(struct usnic_uiom_chunk, page_list)) /\ ((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] - \ (void *) &((struct usnic_uiom_chunk *) 0)->page_list[0])) static int usnic_uiom_dma_fault(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags, void *token) { usnic_err("Device %s iommu fault domain 0x%pK va 0x%lx flags 0x%x\n", dev_name(dev), domain, iova, flags); return -ENOSYS; } static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty) { struct usnic_uiom_chunk *chunk, *tmp; struct page *page; struct scatterlist *sg; int i; dma_addr_t pa; list_for_each_entry_safe(chunk, tmp, chunk_list, list) { for_each_sg(chunk->page_list, sg, chunk->nents, i) { page = sg_page(sg); pa = sg_phys(sg); unpin_user_pages_dirty_lock(&page, 1, dirty); usnic_dbg("pa: %pa\n", &pa); } kfree(chunk); } } static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable, int dmasync, struct usnic_uiom_reg *uiomr) { struct list_head *chunk_list = &uiomr->chunk_list; unsigned int gup_flags = FOLL_LONGTERM; struct page **page_list; struct scatterlist *sg; struct usnic_uiom_chunk *chunk; unsigned long locked; unsigned long lock_limit; unsigned long cur_base; unsigned long npages; int ret; int off; int i; dma_addr_t pa; struct mm_struct *mm; /* * If the combination of the addr and size requested for this memory * region causes an integer overflow, return error. */ if (((addr + size) < addr) || PAGE_ALIGN(addr + size) < (addr + size)) return -EINVAL; if (!size) return -EINVAL; if (!can_do_mlock()) return -EPERM; INIT_LIST_HEAD(chunk_list); page_list = (struct page **) __get_free_page(GFP_KERNEL); if (!page_list) return -ENOMEM; npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT; uiomr->owning_mm = mm = current->mm; mmap_read_lock(mm); locked = atomic64_add_return(npages, &current->mm->pinned_vm); lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) { ret = -ENOMEM; goto out; } if (writable) gup_flags |= FOLL_WRITE; cur_base = addr & PAGE_MASK; ret = 0; while (npages) { ret = pin_user_pages(cur_base, min_t(unsigned long, npages, PAGE_SIZE / sizeof(struct page *)), gup_flags, page_list); if (ret < 0) goto out; npages -= ret; off = 0; while (ret) { chunk = kmalloc(struct_size(chunk, page_list, min_t(int, ret, USNIC_UIOM_PAGE_CHUNK)), GFP_KERNEL); if (!chunk) { ret = -ENOMEM; goto out; } chunk->nents = min_t(int, ret, USNIC_UIOM_PAGE_CHUNK); sg_init_table(chunk->page_list, chunk->nents); for_each_sg(chunk->page_list, sg, chunk->nents, i) { sg_set_page(sg, page_list[i + off], PAGE_SIZE, 0); pa = sg_phys(sg); usnic_dbg("va: 0x%lx pa: %pa\n", cur_base + i*PAGE_SIZE, &pa); } cur_base += chunk->nents * PAGE_SIZE; ret -= chunk->nents; off += chunk->nents; list_add_tail(&chunk->list, chunk_list); } ret = 0; } out: if (ret < 0) { usnic_uiom_put_pages(chunk_list, 0); atomic64_sub(npages, &current->mm->pinned_vm); } else mmgrab(uiomr->owning_mm); mmap_read_unlock(mm); free_page((unsigned long) page_list); return ret; } static void usnic_uiom_unmap_sorted_intervals(struct list_head *intervals, struct usnic_uiom_pd *pd) { struct usnic_uiom_interval_node *interval, *tmp; long unsigned va, size; list_for_each_entry_safe(interval, tmp, intervals, link) { va = interval->start << PAGE_SHIFT; size = ((interval->last - interval->start) + 1) << PAGE_SHIFT; while (size > 0) { /* Workaround for RH 970401 */ usnic_dbg("va 0x%lx size 0x%lx", va, PAGE_SIZE); iommu_unmap(pd->domain, va, PAGE_SIZE); va += PAGE_SIZE; size -= PAGE_SIZE; } } } static void __usnic_uiom_reg_release(struct usnic_uiom_pd *pd, struct usnic_uiom_reg *uiomr, int dirty) { int npages; unsigned long vpn_start, vpn_last; struct usnic_uiom_interval_node *interval, *tmp; int writable = 0; LIST_HEAD(rm_intervals); npages = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT; vpn_start = (uiomr->va & PAGE_MASK) >> PAGE_SHIFT; vpn_last = vpn_start + npages - 1; spin_lock(&pd->lock); usnic_uiom_remove_interval(&pd->root, vpn_start, vpn_last, &rm_intervals); usnic_uiom_unmap_sorted_intervals(&rm_intervals, pd); list_for_each_entry_safe(interval, tmp, &rm_intervals, link) { if (interval->flags & IOMMU_WRITE) writable = 1; list_del(&interval->link); kfree(interval); } usnic_uiom_put_pages(&uiomr->chunk_list, dirty & writable); spin_unlock(&pd->lock); } static int usnic_uiom_map_sorted_intervals(struct list_head *intervals, struct usnic_uiom_reg *uiomr) { int i, err; size_t size; struct usnic_uiom_chunk *chunk; struct usnic_uiom_interval_node *interval_node; dma_addr_t pa; dma_addr_t pa_start = 0; dma_addr_t pa_end = 0; long int va_start = -EINVAL; struct usnic_uiom_pd *pd = uiomr->pd; long int va = uiomr->va & PAGE_MASK; int flags = IOMMU_READ | IOMMU_CACHE; flags |= (uiomr->writable) ? IOMMU_WRITE : 0; chunk = list_first_entry(&uiomr->chunk_list, struct usnic_uiom_chunk, list); list_for_each_entry(interval_node, intervals, link) { iter_chunk: for (i = 0; i < chunk->nents; i++, va += PAGE_SIZE) { pa = sg_phys(&chunk->page_list[i]); if ((va >> PAGE_SHIFT) < interval_node->start) continue; if ((va >> PAGE_SHIFT) == interval_node->start) { /* First page of the interval */ va_start = va; pa_start = pa; pa_end = pa; } WARN_ON(va_start == -EINVAL); if ((pa_end + PAGE_SIZE != pa) && (pa != pa_start)) { /* PAs are not contiguous */ size = pa_end - pa_start + PAGE_SIZE; usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x", va_start, &pa_start, size, flags); err = iommu_map(pd->domain, va_start, pa_start, size, flags, GFP_ATOMIC); if (err) { usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n", va_start, &pa_start, size, err); goto err_out; } va_start = va; pa_start = pa; pa_end = pa; } if ((va >> PAGE_SHIFT) == interval_node->last) { /* Last page of the interval */ size = pa - pa_start + PAGE_SIZE; usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n", va_start, &pa_start, size, flags); err = iommu_map(pd->domain, va_start, pa_start, size, flags, GFP_ATOMIC); if (err) { usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n", va_start, &pa_start, size, err); goto err_out; } break; } if (pa != pa_start) pa_end += PAGE_SIZE; } if (i == chunk->nents) { /* * Hit last entry of the chunk, * hence advance to next chunk */ chunk = list_first_entry(&chunk->list, struct usnic_uiom_chunk, list); goto iter_chunk; } } return 0; err_out: usnic_uiom_unmap_sorted_intervals(intervals, pd); return err; } struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd, unsigned long addr, size_t size, int writable, int dmasync) { struct usnic_uiom_reg *uiomr; unsigned long va_base, vpn_start, vpn_last; unsigned long npages; int offset, err; LIST_HEAD(sorted_diff_intervals); /* * Intel IOMMU map throws an error if a translation entry is * changed from read to write. This module may not unmap * and then remap the entry after fixing the permission * b/c this open up a small windows where hw DMA may page fault * Hence, make all entries to be writable. */ writable = 1; va_base = addr & PAGE_MASK; offset = addr & ~PAGE_MASK; npages = PAGE_ALIGN(size + offset) >> PAGE_SHIFT; vpn_start = (addr & PAGE_MASK) >> PAGE_SHIFT; vpn_last = vpn_start + npages - 1; uiomr = kmalloc(sizeof(*uiomr), GFP_KERNEL); if (!uiomr) return ERR_PTR(-ENOMEM); uiomr->va = va_base; uiomr->offset = offset; uiomr->length = size; uiomr->writable = writable; uiomr->pd = pd; err = usnic_uiom_get_pages(addr, size, writable, dmasync, uiomr); if (err) { usnic_err("Failed get_pages vpn [0x%lx,0x%lx] err %d\n", vpn_start, vpn_last, err); goto out_free_uiomr; } spin_lock(&pd->lock); err = usnic_uiom_get_intervals_diff(vpn_start, vpn_last, (writable) ? IOMMU_WRITE : 0, IOMMU_WRITE, &pd->root, &sorted_diff_intervals); if (err) { usnic_err("Failed disjoint interval vpn [0x%lx,0x%lx] err %d\n", vpn_start, vpn_last, err); goto out_put_pages; } err = usnic_uiom_map_sorted_intervals(&sorted_diff_intervals, uiomr); if (err) { usnic_err("Failed map interval vpn [0x%lx,0x%lx] err %d\n", vpn_start, vpn_last, err); goto out_put_intervals; } err = usnic_uiom_insert_interval(&pd->root, vpn_start, vpn_last, (writable) ? IOMMU_WRITE : 0); if (err) { usnic_err("Failed insert interval vpn [0x%lx,0x%lx] err %d\n", vpn_start, vpn_last, err); goto out_unmap_intervals; } usnic_uiom_put_interval_set(&sorted_diff_intervals); spin_unlock(&pd->lock); return uiomr; out_unmap_intervals: usnic_uiom_unmap_sorted_intervals(&sorted_diff_intervals, pd); out_put_intervals: usnic_uiom_put_interval_set(&sorted_diff_intervals); out_put_pages: usnic_uiom_put_pages(&uiomr->chunk_list, 0); spin_unlock(&pd->lock); mmdrop(uiomr->owning_mm); out_free_uiomr: kfree(uiomr); return ERR_PTR(err); } static void __usnic_uiom_release_tail(struct usnic_uiom_reg *uiomr) { mmdrop(uiomr->owning_mm); kfree(uiomr); } static inline size_t usnic_uiom_num_pages(struct usnic_uiom_reg *uiomr) { return PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT; } void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr) { __usnic_uiom_reg_release(uiomr->pd, uiomr, 1); atomic64_sub(usnic_uiom_num_pages(uiomr), &uiomr->owning_mm->pinned_vm); __usnic_uiom_release_tail(uiomr); } struct usnic_uiom_pd *usnic_uiom_alloc_pd(struct device *dev) { struct usnic_uiom_pd *pd; void *domain; pd = kzalloc(sizeof(*pd), GFP_KERNEL); if (!pd) return ERR_PTR(-ENOMEM); pd->domain = domain = iommu_domain_alloc(dev->bus); if (!domain) { usnic_err("Failed to allocate IOMMU domain"); kfree(pd); return ERR_PTR(-ENOMEM); } iommu_set_fault_handler(pd->domain, usnic_uiom_dma_fault, NULL); spin_lock_init(&pd->lock); INIT_LIST_HEAD(&pd->devs); return pd; } void usnic_uiom_dealloc_pd(struct usnic_uiom_pd *pd) { iommu_domain_free(pd->domain); kfree(pd); } int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev) { struct usnic_uiom_dev *uiom_dev; int err; uiom_dev = kzalloc(sizeof(*uiom_dev), GFP_ATOMIC); if (!uiom_dev) return -ENOMEM; uiom_dev->dev = dev; err = iommu_attach_device(pd->domain, dev); if (err) goto out_free_dev; if (!device_iommu_capable(dev, IOMMU_CAP_CACHE_COHERENCY)) { usnic_err("IOMMU of %s does not support cache coherency\n", dev_name(dev)); err = -EINVAL; goto out_detach_device; } spin_lock(&pd->lock); list_add_tail(&uiom_dev->link, &pd->devs); pd->dev_cnt++; spin_unlock(&pd->lock); return 0; out_detach_device: iommu_detach_device(pd->domain, dev); out_free_dev: kfree(uiom_dev); return err; } void usnic_uiom_detach_dev_from_pd(struct usnic_uiom_pd *pd, struct device *dev) { struct usnic_uiom_dev *uiom_dev; int found = 0; spin_lock(&pd->lock); list_for_each_entry(uiom_dev, &pd->devs, link) { if (uiom_dev->dev == dev) { found = 1; break; } } if (!found) { usnic_err("Unable to free dev %s - not found\n", dev_name(dev)); spin_unlock(&pd->lock); return; } list_del(&uiom_dev->link); pd->dev_cnt--; spin_unlock(&pd->lock); return iommu_detach_device(pd->domain, dev); } struct device **usnic_uiom_get_dev_list(struct usnic_uiom_pd *pd) { struct usnic_uiom_dev *uiom_dev; struct device **devs; int i = 0; spin_lock(&pd->lock); devs = kcalloc(pd->dev_cnt + 1, sizeof(*devs), GFP_ATOMIC); if (!devs) { devs = ERR_PTR(-ENOMEM); goto out; } list_for_each_entry(uiom_dev, &pd->devs, link) { devs[i++] = uiom_dev->dev; } out: spin_unlock(&pd->lock); return devs; } void usnic_uiom_free_dev_list(struct device **devs) { kfree(devs); }
linux-master
drivers/infiniband/hw/usnic/usnic_uiom.c
/* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/bitmap.h> #include <linux/file.h> #include <linux/slab.h> #include <net/inet_sock.h> #include "usnic_transport.h" #include "usnic_log.h" /* ROCE */ static unsigned long *roce_bitmap; static u16 roce_next_port = 1; #define ROCE_BITMAP_SZ ((1 << (8 /*CHAR_BIT*/ * sizeof(u16)))/8 /*CHAR BIT*/) static DEFINE_SPINLOCK(roce_bitmap_lock); const char *usnic_transport_to_str(enum usnic_transport_type type) { switch (type) { case USNIC_TRANSPORT_UNKNOWN: return "Unknown"; case USNIC_TRANSPORT_ROCE_CUSTOM: return "roce custom"; case USNIC_TRANSPORT_IPV4_UDP: return "IPv4 UDP"; case USNIC_TRANSPORT_MAX: return "Max?"; default: return "Not known"; } } int usnic_transport_sock_to_str(char *buf, int buf_sz, struct socket *sock) { int err; uint32_t addr; uint16_t port; int proto; memset(buf, 0, buf_sz); err = usnic_transport_sock_get_addr(sock, &proto, &addr, &port); if (err) return 0; return scnprintf(buf, buf_sz, "Proto:%u Addr:%pI4h Port:%hu", proto, &addr, port); } /* * reserve a port number. if "0" specified, we will try to pick one * starting at roce_next_port. roce_next_port will take on the values * 1..4096 */ u16 usnic_transport_rsrv_port(enum usnic_transport_type type, u16 port_num) { if (type == USNIC_TRANSPORT_ROCE_CUSTOM) { spin_lock(&roce_bitmap_lock); if (!port_num) { port_num = bitmap_find_next_zero_area(roce_bitmap, ROCE_BITMAP_SZ, roce_next_port /* start */, 1 /* nr */, 0 /* align */); roce_next_port = (port_num & 4095) + 1; } else if (test_bit(port_num, roce_bitmap)) { usnic_err("Failed to allocate port for %s\n", usnic_transport_to_str(type)); spin_unlock(&roce_bitmap_lock); goto out_fail; } bitmap_set(roce_bitmap, port_num, 1); spin_unlock(&roce_bitmap_lock); } else { usnic_err("Failed to allocate port - transport %s unsupported\n", usnic_transport_to_str(type)); goto out_fail; } usnic_dbg("Allocating port %hu for %s\n", port_num, usnic_transport_to_str(type)); return port_num; out_fail: return 0; } void usnic_transport_unrsrv_port(enum usnic_transport_type type, u16 port_num) { if (type == USNIC_TRANSPORT_ROCE_CUSTOM) { spin_lock(&roce_bitmap_lock); if (!port_num) { usnic_err("Unreserved invalid port num 0 for %s\n", usnic_transport_to_str(type)); goto out_roce_custom; } if (!test_bit(port_num, roce_bitmap)) { usnic_err("Unreserving invalid %hu for %s\n", port_num, usnic_transport_to_str(type)); goto out_roce_custom; } bitmap_clear(roce_bitmap, port_num, 1); usnic_dbg("Freeing port %hu for %s\n", port_num, usnic_transport_to_str(type)); out_roce_custom: spin_unlock(&roce_bitmap_lock); } else { usnic_err("Freeing invalid port %hu for %d\n", port_num, type); } } struct socket *usnic_transport_get_socket(int sock_fd) { struct socket *sock; int err; char buf[25]; /* sockfd_lookup will internally do a fget */ sock = sockfd_lookup(sock_fd, &err); if (!sock) { usnic_err("Unable to lookup socket for fd %d with err %d\n", sock_fd, err); return ERR_PTR(-ENOENT); } usnic_transport_sock_to_str(buf, sizeof(buf), sock); usnic_dbg("Get sock %s\n", buf); return sock; } void usnic_transport_put_socket(struct socket *sock) { char buf[100]; usnic_transport_sock_to_str(buf, sizeof(buf), sock); usnic_dbg("Put sock %s\n", buf); sockfd_put(sock); } int usnic_transport_sock_get_addr(struct socket *sock, int *proto, uint32_t *addr, uint16_t *port) { int err; struct sockaddr_in sock_addr; err = sock->ops->getname(sock, (struct sockaddr *)&sock_addr, 0); if (err < 0) return err; if (sock_addr.sin_family != AF_INET) return -EINVAL; if (proto) *proto = sock->sk->sk_protocol; if (port) *port = ntohs(((struct sockaddr_in *)&sock_addr)->sin_port); if (addr) *addr = ntohl(((struct sockaddr_in *) &sock_addr)->sin_addr.s_addr); return 0; } int usnic_transport_init(void) { roce_bitmap = kzalloc(ROCE_BITMAP_SZ, GFP_KERNEL); if (!roce_bitmap) return -ENOMEM; /* Do not ever allocate bit 0, hence set it here */ bitmap_set(roce_bitmap, 0, 1); return 0; } void usnic_transport_fini(void) { kfree(roce_bitmap); }
linux-master
drivers/infiniband/hw/usnic/usnic_transport.c
/* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/errno.h> #include <linux/pci.h> #include "usnic_ib.h" #include "vnic_resource.h" #include "usnic_log.h" #include "usnic_vnic.h" struct usnic_vnic { struct vnic_dev *vdev; struct vnic_dev_bar bar[PCI_NUM_RESOURCES]; struct usnic_vnic_res_chunk chunks[USNIC_VNIC_RES_TYPE_MAX]; spinlock_t res_lock; }; static enum vnic_res_type _to_vnic_res_type(enum usnic_vnic_res_type res_type) { #define DEFINE_USNIC_VNIC_RES_AT(usnic_vnic_res_t, vnic_res_type, desc, val) \ vnic_res_type, #define DEFINE_USNIC_VNIC_RES(usnic_vnic_res_t, vnic_res_type, desc) \ vnic_res_type, static enum vnic_res_type usnic_vnic_type_2_vnic_type[] = { USNIC_VNIC_RES_TYPES}; #undef DEFINE_USNIC_VNIC_RES #undef DEFINE_USNIC_VNIC_RES_AT if (res_type >= USNIC_VNIC_RES_TYPE_MAX) return RES_TYPE_MAX; return usnic_vnic_type_2_vnic_type[res_type]; } const char *usnic_vnic_res_type_to_str(enum usnic_vnic_res_type res_type) { #define DEFINE_USNIC_VNIC_RES_AT(usnic_vnic_res_t, vnic_res_type, desc, val) \ desc, #define DEFINE_USNIC_VNIC_RES(usnic_vnic_res_t, vnic_res_type, desc) \ desc, static const char * const usnic_vnic_res_type_desc[] = { USNIC_VNIC_RES_TYPES}; #undef DEFINE_USNIC_VNIC_RES #undef DEFINE_USNIC_VNIC_RES_AT if (res_type >= USNIC_VNIC_RES_TYPE_MAX) return "unknown"; return usnic_vnic_res_type_desc[res_type]; } const char *usnic_vnic_pci_name(struct usnic_vnic *vnic) { return pci_name(usnic_vnic_get_pdev(vnic)); } int usnic_vnic_dump(struct usnic_vnic *vnic, char *buf, int buf_sz, void *hdr_obj, int (*printtitle)(void *, char*, int), int (*printcols)(char *, int), int (*printrow)(void *, char *, int)) { struct usnic_vnic_res_chunk *chunk; struct usnic_vnic_res *res; struct vnic_dev_bar *bar0; int i, j, offset; offset = 0; bar0 = usnic_vnic_get_bar(vnic, 0); offset += scnprintf(buf + offset, buf_sz - offset, "VF:%hu BAR0 bus_addr=%pa vaddr=0x%p size=%ld ", usnic_vnic_get_index(vnic), &bar0->bus_addr, bar0->vaddr, bar0->len); if (printtitle) offset += printtitle(hdr_obj, buf + offset, buf_sz - offset); offset += scnprintf(buf + offset, buf_sz - offset, "\n"); offset += scnprintf(buf + offset, buf_sz - offset, "|RES\t|CTRL_PIN\t\t|IN_USE\t"); if (printcols) offset += printcols(buf + offset, buf_sz - offset); offset += scnprintf(buf + offset, buf_sz - offset, "\n"); spin_lock(&vnic->res_lock); for (i = 0; i < ARRAY_SIZE(vnic->chunks); i++) { chunk = &vnic->chunks[i]; for (j = 0; j < chunk->cnt; j++) { res = chunk->res[j]; offset += scnprintf(buf + offset, buf_sz - offset, "|%s[%u]\t|0x%p\t|%u\t", usnic_vnic_res_type_to_str(res->type), res->vnic_idx, res->ctrl, !!res->owner); if (printrow) { offset += printrow(res->owner, buf + offset, buf_sz - offset); } offset += scnprintf(buf + offset, buf_sz - offset, "\n"); } } spin_unlock(&vnic->res_lock); return offset; } void usnic_vnic_res_spec_update(struct usnic_vnic_res_spec *spec, enum usnic_vnic_res_type trgt_type, u16 cnt) { int i; for (i = 0; i < USNIC_VNIC_RES_TYPE_MAX; i++) { if (spec->resources[i].type == trgt_type) { spec->resources[i].cnt = cnt; return; } } WARN_ON(1); } int usnic_vnic_res_spec_satisfied(const struct usnic_vnic_res_spec *min_spec, struct usnic_vnic_res_spec *res_spec) { int found, i, j; for (i = 0; i < USNIC_VNIC_RES_TYPE_MAX; i++) { found = 0; for (j = 0; j < USNIC_VNIC_RES_TYPE_MAX; j++) { if (res_spec->resources[i].type != min_spec->resources[i].type) continue; found = 1; if (min_spec->resources[i].cnt > res_spec->resources[i].cnt) return -EINVAL; break; } if (!found) return -EINVAL; } return 0; } int usnic_vnic_spec_dump(char *buf, int buf_sz, struct usnic_vnic_res_spec *res_spec) { enum usnic_vnic_res_type res_type; int res_cnt; int i; int offset = 0; for (i = 0; i < USNIC_VNIC_RES_TYPE_MAX; i++) { res_type = res_spec->resources[i].type; res_cnt = res_spec->resources[i].cnt; offset += scnprintf(buf + offset, buf_sz - offset, "Res: %s Cnt: %d ", usnic_vnic_res_type_to_str(res_type), res_cnt); } return offset; } int usnic_vnic_check_room(struct usnic_vnic *vnic, struct usnic_vnic_res_spec *res_spec) { int i; enum usnic_vnic_res_type res_type; int res_cnt; for (i = 0; i < USNIC_VNIC_RES_TYPE_MAX; i++) { res_type = res_spec->resources[i].type; res_cnt = res_spec->resources[i].cnt; if (res_type == USNIC_VNIC_RES_TYPE_EOL) break; if (res_cnt > usnic_vnic_res_free_cnt(vnic, res_type)) return -EBUSY; } return 0; } int usnic_vnic_res_cnt(struct usnic_vnic *vnic, enum usnic_vnic_res_type type) { return vnic->chunks[type].cnt; } int usnic_vnic_res_free_cnt(struct usnic_vnic *vnic, enum usnic_vnic_res_type type) { return vnic->chunks[type].free_cnt; } struct usnic_vnic_res_chunk * usnic_vnic_get_resources(struct usnic_vnic *vnic, enum usnic_vnic_res_type type, int cnt, void *owner) { struct usnic_vnic_res_chunk *src, *ret; struct usnic_vnic_res *res; int i; if (usnic_vnic_res_free_cnt(vnic, type) < cnt || cnt < 0 || !owner) return ERR_PTR(-EINVAL); ret = kzalloc(sizeof(*ret), GFP_ATOMIC); if (!ret) return ERR_PTR(-ENOMEM); if (cnt > 0) { ret->res = kcalloc(cnt, sizeof(*(ret->res)), GFP_ATOMIC); if (!ret->res) { kfree(ret); return ERR_PTR(-ENOMEM); } spin_lock(&vnic->res_lock); src = &vnic->chunks[type]; for (i = 0; i < src->cnt && ret->cnt < cnt; i++) { res = src->res[i]; if (!res->owner) { src->free_cnt--; res->owner = owner; ret->res[ret->cnt++] = res; } } spin_unlock(&vnic->res_lock); } ret->type = type; ret->vnic = vnic; WARN_ON(ret->cnt != cnt); return ret; } void usnic_vnic_put_resources(struct usnic_vnic_res_chunk *chunk) { struct usnic_vnic_res *res; int i; struct usnic_vnic *vnic = chunk->vnic; if (chunk->cnt > 0) { spin_lock(&vnic->res_lock); while ((i = --chunk->cnt) >= 0) { res = chunk->res[i]; chunk->res[i] = NULL; res->owner = NULL; vnic->chunks[res->type].free_cnt++; } spin_unlock(&vnic->res_lock); } kfree(chunk->res); kfree(chunk); } u16 usnic_vnic_get_index(struct usnic_vnic *vnic) { return usnic_vnic_get_pdev(vnic)->devfn - 1; } static int usnic_vnic_alloc_res_chunk(struct usnic_vnic *vnic, enum usnic_vnic_res_type type, struct usnic_vnic_res_chunk *chunk) { int cnt, err, i; struct usnic_vnic_res *res; cnt = vnic_dev_get_res_count(vnic->vdev, _to_vnic_res_type(type)); if (cnt < 1) { usnic_err("Wrong res count with cnt %d\n", cnt); return -EINVAL; } chunk->cnt = chunk->free_cnt = cnt; chunk->res = kcalloc(cnt, sizeof(*(chunk->res)), GFP_KERNEL); if (!chunk->res) return -ENOMEM; for (i = 0; i < cnt; i++) { res = kzalloc(sizeof(*res), GFP_KERNEL); if (!res) { err = -ENOMEM; goto fail; } res->type = type; res->vnic_idx = i; res->vnic = vnic; res->ctrl = vnic_dev_get_res(vnic->vdev, _to_vnic_res_type(type), i); chunk->res[i] = res; } chunk->vnic = vnic; return 0; fail: for (i--; i >= 0; i--) kfree(chunk->res[i]); kfree(chunk->res); return err; } static void usnic_vnic_free_res_chunk(struct usnic_vnic_res_chunk *chunk) { int i; for (i = 0; i < chunk->cnt; i++) kfree(chunk->res[i]); kfree(chunk->res); } static int usnic_vnic_discover_resources(struct pci_dev *pdev, struct usnic_vnic *vnic) { enum usnic_vnic_res_type res_type; int i; int err = 0; for (i = 0; i < ARRAY_SIZE(vnic->bar); i++) { if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM)) continue; vnic->bar[i].len = pci_resource_len(pdev, i); vnic->bar[i].vaddr = pci_iomap(pdev, i, vnic->bar[i].len); if (!vnic->bar[i].vaddr) { usnic_err("Cannot memory-map BAR %d, aborting\n", i); err = -ENODEV; goto out_clean_bar; } vnic->bar[i].bus_addr = pci_resource_start(pdev, i); } vnic->vdev = vnic_dev_register(NULL, pdev, pdev, vnic->bar, ARRAY_SIZE(vnic->bar)); if (!vnic->vdev) { usnic_err("Failed to register device %s\n", pci_name(pdev)); err = -EINVAL; goto out_clean_bar; } for (res_type = USNIC_VNIC_RES_TYPE_EOL + 1; res_type < USNIC_VNIC_RES_TYPE_MAX; res_type++) { err = usnic_vnic_alloc_res_chunk(vnic, res_type, &vnic->chunks[res_type]); if (err) goto out_clean_chunks; } return 0; out_clean_chunks: for (res_type--; res_type > USNIC_VNIC_RES_TYPE_EOL; res_type--) usnic_vnic_free_res_chunk(&vnic->chunks[res_type]); vnic_dev_unregister(vnic->vdev); out_clean_bar: for (i = 0; i < ARRAY_SIZE(vnic->bar); i++) { if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM)) continue; if (!vnic->bar[i].vaddr) break; iounmap(vnic->bar[i].vaddr); } return err; } struct pci_dev *usnic_vnic_get_pdev(struct usnic_vnic *vnic) { return vnic_dev_get_pdev(vnic->vdev); } struct vnic_dev_bar *usnic_vnic_get_bar(struct usnic_vnic *vnic, int bar_num) { return (bar_num < ARRAY_SIZE(vnic->bar)) ? &vnic->bar[bar_num] : NULL; } static void usnic_vnic_release_resources(struct usnic_vnic *vnic) { int i; struct pci_dev *pdev; enum usnic_vnic_res_type res_type; pdev = usnic_vnic_get_pdev(vnic); for (res_type = USNIC_VNIC_RES_TYPE_EOL + 1; res_type < USNIC_VNIC_RES_TYPE_MAX; res_type++) usnic_vnic_free_res_chunk(&vnic->chunks[res_type]); vnic_dev_unregister(vnic->vdev); for (i = 0; i < ARRAY_SIZE(vnic->bar); i++) { if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM)) continue; iounmap(vnic->bar[i].vaddr); } } struct usnic_vnic *usnic_vnic_alloc(struct pci_dev *pdev) { struct usnic_vnic *vnic; int err = 0; if (!pci_is_enabled(pdev)) { usnic_err("PCI dev %s is disabled\n", pci_name(pdev)); return ERR_PTR(-EINVAL); } vnic = kzalloc(sizeof(*vnic), GFP_KERNEL); if (!vnic) return ERR_PTR(-ENOMEM); spin_lock_init(&vnic->res_lock); err = usnic_vnic_discover_resources(pdev, vnic); if (err) { usnic_err("Failed to discover %s resources with err %d\n", pci_name(pdev), err); goto out_free_vnic; } usnic_dbg("Allocated vnic for %s\n", usnic_vnic_pci_name(vnic)); return vnic; out_free_vnic: kfree(vnic); return ERR_PTR(err); } void usnic_vnic_free(struct usnic_vnic *vnic) { usnic_vnic_release_resources(vnic); kfree(vnic); }
linux-master
drivers/infiniband/hw/usnic/usnic_vnic.c
/* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/netdevice.h> #include <linux/pci.h> #include "enic_api.h" #include "usnic_common_pkt_hdr.h" #include "usnic_fwd.h" #include "usnic_log.h" static int usnic_fwd_devcmd_locked(struct usnic_fwd_dev *ufdev, int vnic_idx, enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1) { int status; struct net_device *netdev = ufdev->netdev; lockdep_assert_held(&ufdev->lock); status = enic_api_devcmd_proxy_by_index(netdev, vnic_idx, cmd, a0, a1, 1000); if (status) { if (status == ERR_EINVAL && cmd == CMD_DEL_FILTER) { usnic_dbg("Dev %s vnic idx %u cmd %u already deleted", ufdev->name, vnic_idx, cmd); } else { usnic_err("Dev %s vnic idx %u cmd %u failed with status %d\n", ufdev->name, vnic_idx, cmd, status); } } else { usnic_dbg("Dev %s vnic idx %u cmd %u success", ufdev->name, vnic_idx, cmd); } return status; } static int usnic_fwd_devcmd(struct usnic_fwd_dev *ufdev, int vnic_idx, enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1) { int status; spin_lock(&ufdev->lock); status = usnic_fwd_devcmd_locked(ufdev, vnic_idx, cmd, a0, a1); spin_unlock(&ufdev->lock); return status; } struct usnic_fwd_dev *usnic_fwd_dev_alloc(struct pci_dev *pdev) { struct usnic_fwd_dev *ufdev; ufdev = kzalloc(sizeof(*ufdev), GFP_KERNEL); if (!ufdev) return NULL; ufdev->pdev = pdev; ufdev->netdev = pci_get_drvdata(pdev); spin_lock_init(&ufdev->lock); BUILD_BUG_ON(sizeof(ufdev->name) != sizeof(ufdev->netdev->name)); strcpy(ufdev->name, ufdev->netdev->name); return ufdev; } void usnic_fwd_dev_free(struct usnic_fwd_dev *ufdev) { kfree(ufdev); } void usnic_fwd_set_mac(struct usnic_fwd_dev *ufdev, const char mac[ETH_ALEN]) { spin_lock(&ufdev->lock); memcpy(&ufdev->mac, mac, sizeof(ufdev->mac)); spin_unlock(&ufdev->lock); } void usnic_fwd_add_ipaddr(struct usnic_fwd_dev *ufdev, __be32 inaddr) { spin_lock(&ufdev->lock); if (!ufdev->inaddr) ufdev->inaddr = inaddr; spin_unlock(&ufdev->lock); } void usnic_fwd_del_ipaddr(struct usnic_fwd_dev *ufdev) { spin_lock(&ufdev->lock); ufdev->inaddr = 0; spin_unlock(&ufdev->lock); } void usnic_fwd_carrier_up(struct usnic_fwd_dev *ufdev) { spin_lock(&ufdev->lock); ufdev->link_up = 1; spin_unlock(&ufdev->lock); } void usnic_fwd_carrier_down(struct usnic_fwd_dev *ufdev) { spin_lock(&ufdev->lock); ufdev->link_up = 0; spin_unlock(&ufdev->lock); } void usnic_fwd_set_mtu(struct usnic_fwd_dev *ufdev, unsigned int mtu) { spin_lock(&ufdev->lock); ufdev->mtu = mtu; spin_unlock(&ufdev->lock); } static int usnic_fwd_dev_ready_locked(struct usnic_fwd_dev *ufdev) { lockdep_assert_held(&ufdev->lock); if (!ufdev->link_up) return -EPERM; return 0; } static int validate_filter_locked(struct usnic_fwd_dev *ufdev, struct filter *filter) { lockdep_assert_held(&ufdev->lock); if (filter->type == FILTER_IPV4_5TUPLE) { if (!(filter->u.ipv4.flags & FILTER_FIELD_5TUP_DST_AD)) return -EACCES; if (!(filter->u.ipv4.flags & FILTER_FIELD_5TUP_DST_PT)) return -EBUSY; else if (ufdev->inaddr == 0) return -EINVAL; else if (filter->u.ipv4.dst_port == 0) return -ERANGE; else if (ntohl(ufdev->inaddr) != filter->u.ipv4.dst_addr) return -EFAULT; else return 0; } return 0; } static void fill_tlv(struct filter_tlv *tlv, struct filter *filter, struct filter_action *action) { tlv->type = CLSF_TLV_FILTER; tlv->length = sizeof(struct filter); *((struct filter *)&tlv->val) = *filter; tlv = (struct filter_tlv *)((char *)tlv + sizeof(struct filter_tlv) + sizeof(struct filter)); tlv->type = CLSF_TLV_ACTION; tlv->length = sizeof(struct filter_action); *((struct filter_action *)&tlv->val) = *action; } struct usnic_fwd_flow* usnic_fwd_alloc_flow(struct usnic_fwd_dev *ufdev, struct filter *filter, struct usnic_filter_action *uaction) { struct filter_tlv *tlv; struct pci_dev *pdev; struct usnic_fwd_flow *flow; uint64_t a0, a1; uint64_t tlv_size; dma_addr_t tlv_pa; int status; pdev = ufdev->pdev; tlv_size = (2*sizeof(struct filter_tlv) + sizeof(struct filter) + sizeof(struct filter_action)); flow = kzalloc(sizeof(*flow), GFP_ATOMIC); if (!flow) return ERR_PTR(-ENOMEM); tlv = dma_alloc_coherent(&pdev->dev, tlv_size, &tlv_pa, GFP_ATOMIC); if (!tlv) { usnic_err("Failed to allocate memory\n"); status = -ENOMEM; goto out_free_flow; } fill_tlv(tlv, filter, &uaction->action); spin_lock(&ufdev->lock); status = usnic_fwd_dev_ready_locked(ufdev); if (status) { usnic_err("Forwarding dev %s not ready with status %d\n", ufdev->name, status); goto out_free_tlv; } status = validate_filter_locked(ufdev, filter); if (status) { usnic_err("Failed to validate filter with status %d\n", status); goto out_free_tlv; } /* Issue Devcmd */ a0 = tlv_pa; a1 = tlv_size; status = usnic_fwd_devcmd_locked(ufdev, uaction->vnic_idx, CMD_ADD_FILTER, &a0, &a1); if (status) { usnic_err("VF %s Filter add failed with status:%d", ufdev->name, status); status = -EFAULT; goto out_free_tlv; } else { usnic_dbg("VF %s FILTER ID:%llu", ufdev->name, a0); } flow->flow_id = (uint32_t) a0; flow->vnic_idx = uaction->vnic_idx; flow->ufdev = ufdev; out_free_tlv: spin_unlock(&ufdev->lock); dma_free_coherent(&pdev->dev, tlv_size, tlv, tlv_pa); if (!status) return flow; out_free_flow: kfree(flow); return ERR_PTR(status); } int usnic_fwd_dealloc_flow(struct usnic_fwd_flow *flow) { int status; u64 a0, a1; a0 = flow->flow_id; status = usnic_fwd_devcmd(flow->ufdev, flow->vnic_idx, CMD_DEL_FILTER, &a0, &a1); if (status) { if (status == ERR_EINVAL) { usnic_dbg("Filter %u already deleted for VF Idx %u pf: %s status: %d", flow->flow_id, flow->vnic_idx, flow->ufdev->name, status); } else { usnic_err("PF %s VF Idx %u Filter: %u FILTER DELETE failed with status %d", flow->ufdev->name, flow->vnic_idx, flow->flow_id, status); } status = 0; /* * Log the error and fake success to the caller because if * a flow fails to be deleted in the firmware, it is an * unrecoverable error. */ } else { usnic_dbg("PF %s VF Idx %u Filter: %u FILTER DELETED", flow->ufdev->name, flow->vnic_idx, flow->flow_id); } kfree(flow); return status; } int usnic_fwd_enable_qp(struct usnic_fwd_dev *ufdev, int vnic_idx, int qp_idx) { int status; struct net_device *pf_netdev; u64 a0, a1; pf_netdev = ufdev->netdev; a0 = qp_idx; a1 = CMD_QP_RQWQ; status = usnic_fwd_devcmd(ufdev, vnic_idx, CMD_QP_ENABLE, &a0, &a1); if (status) { usnic_err("PF %s VNIC Index %u RQ Index: %u ENABLE Failed with status %d", netdev_name(pf_netdev), vnic_idx, qp_idx, status); } else { usnic_dbg("PF %s VNIC Index %u RQ Index: %u ENABLED", netdev_name(pf_netdev), vnic_idx, qp_idx); } return status; } int usnic_fwd_disable_qp(struct usnic_fwd_dev *ufdev, int vnic_idx, int qp_idx) { int status; u64 a0, a1; struct net_device *pf_netdev; pf_netdev = ufdev->netdev; a0 = qp_idx; a1 = CMD_QP_RQWQ; status = usnic_fwd_devcmd(ufdev, vnic_idx, CMD_QP_DISABLE, &a0, &a1); if (status) { usnic_err("PF %s VNIC Index %u RQ Index: %u DISABLE Failed with status %d", netdev_name(pf_netdev), vnic_idx, qp_idx, status); } else { usnic_dbg("PF %s VNIC Index %u RQ Index: %u DISABLED", netdev_name(pf_netdev), vnic_idx, qp_idx); } return status; }
linux-master
drivers/infiniband/hw/usnic/usnic_fwd.c
/* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/init.h> #include <linux/slab.h> #include <linux/errno.h> #include <rdma/ib_user_verbs.h> #include <rdma/ib_addr.h> #include <rdma/uverbs_ioctl.h> #include "usnic_abi.h" #include "usnic_ib.h" #include "usnic_common_util.h" #include "usnic_ib_qp_grp.h" #include "usnic_ib_verbs.h" #include "usnic_fwd.h" #include "usnic_log.h" #include "usnic_uiom.h" #include "usnic_transport.h" #define USNIC_DEFAULT_TRANSPORT USNIC_TRANSPORT_ROCE_CUSTOM const struct usnic_vnic_res_spec min_transport_spec[USNIC_TRANSPORT_MAX] = { { /*USNIC_TRANSPORT_UNKNOWN*/ .resources = { {.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,}, }, }, { /*USNIC_TRANSPORT_ROCE_CUSTOM*/ .resources = { {.type = USNIC_VNIC_RES_TYPE_WQ, .cnt = 1,}, {.type = USNIC_VNIC_RES_TYPE_RQ, .cnt = 1,}, {.type = USNIC_VNIC_RES_TYPE_CQ, .cnt = 1,}, {.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,}, }, }, { /*USNIC_TRANSPORT_IPV4_UDP*/ .resources = { {.type = USNIC_VNIC_RES_TYPE_WQ, .cnt = 1,}, {.type = USNIC_VNIC_RES_TYPE_RQ, .cnt = 1,}, {.type = USNIC_VNIC_RES_TYPE_CQ, .cnt = 1,}, {.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,}, }, }, }; static void usnic_ib_fw_string_to_u64(char *fw_ver_str, u64 *fw_ver) { *fw_ver = *((u64 *)fw_ver_str); } static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp, struct ib_udata *udata) { struct usnic_ib_dev *us_ibdev; struct usnic_ib_create_qp_resp resp; struct pci_dev *pdev; struct vnic_dev_bar *bar; struct usnic_vnic_res_chunk *chunk; struct usnic_ib_qp_grp_flow *default_flow; int i, err; memset(&resp, 0, sizeof(resp)); us_ibdev = qp_grp->vf->pf; pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic); if (!pdev) { usnic_err("Failed to get pdev of qp_grp %d\n", qp_grp->grp_id); return -EFAULT; } bar = usnic_vnic_get_bar(qp_grp->vf->vnic, 0); if (!bar) { usnic_err("Failed to get bar0 of qp_grp %d vf %s", qp_grp->grp_id, pci_name(pdev)); return -EFAULT; } resp.vfid = usnic_vnic_get_index(qp_grp->vf->vnic); resp.bar_bus_addr = bar->bus_addr; resp.bar_len = bar->len; chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ); if (IS_ERR(chunk)) { usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n", usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ), qp_grp->grp_id, PTR_ERR(chunk)); return PTR_ERR(chunk); } WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_RQ); resp.rq_cnt = chunk->cnt; for (i = 0; i < chunk->cnt; i++) resp.rq_idx[i] = chunk->res[i]->vnic_idx; chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_WQ); if (IS_ERR(chunk)) { usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n", usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_WQ), qp_grp->grp_id, PTR_ERR(chunk)); return PTR_ERR(chunk); } WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_WQ); resp.wq_cnt = chunk->cnt; for (i = 0; i < chunk->cnt; i++) resp.wq_idx[i] = chunk->res[i]->vnic_idx; chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_CQ); if (IS_ERR(chunk)) { usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n", usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_CQ), qp_grp->grp_id, PTR_ERR(chunk)); return PTR_ERR(chunk); } WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_CQ); resp.cq_cnt = chunk->cnt; for (i = 0; i < chunk->cnt; i++) resp.cq_idx[i] = chunk->res[i]->vnic_idx; default_flow = list_first_entry(&qp_grp->flows_lst, struct usnic_ib_qp_grp_flow, link); resp.transport = default_flow->trans_type; err = ib_copy_to_udata(udata, &resp, sizeof(resp)); if (err) { usnic_err("Failed to copy udata for %s", dev_name(&us_ibdev->ib_dev.dev)); return err; } return 0; } static int find_free_vf_and_create_qp_grp(struct ib_qp *qp, struct usnic_transport_spec *trans_spec, struct usnic_vnic_res_spec *res_spec) { struct usnic_ib_dev *us_ibdev = to_usdev(qp->device); struct usnic_ib_pd *pd = to_upd(qp->pd); struct usnic_ib_vf *vf; struct usnic_vnic *vnic; struct usnic_ib_qp_grp *qp_grp = to_uqp_grp(qp); struct device *dev, **dev_list; int i, ret; BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock)); if (list_empty(&us_ibdev->vf_dev_list)) { usnic_info("No vfs to allocate\n"); return -ENOMEM; } if (usnic_ib_share_vf) { /* Try to find resouces on a used vf which is in pd */ dev_list = usnic_uiom_get_dev_list(pd->umem_pd); if (IS_ERR(dev_list)) return PTR_ERR(dev_list); for (i = 0; dev_list[i]; i++) { dev = dev_list[i]; vf = dev_get_drvdata(dev); mutex_lock(&vf->lock); vnic = vf->vnic; if (!usnic_vnic_check_room(vnic, res_spec)) { usnic_dbg("Found used vnic %s from %s\n", dev_name(&us_ibdev->ib_dev.dev), pci_name(usnic_vnic_get_pdev( vnic))); ret = usnic_ib_qp_grp_create(qp_grp, us_ibdev->ufdev, vf, pd, res_spec, trans_spec); mutex_unlock(&vf->lock); goto qp_grp_check; } mutex_unlock(&vf->lock); } usnic_uiom_free_dev_list(dev_list); dev_list = NULL; } /* Try to find resources on an unused vf */ list_for_each_entry(vf, &us_ibdev->vf_dev_list, link) { mutex_lock(&vf->lock); vnic = vf->vnic; if (vf->qp_grp_ref_cnt == 0 && usnic_vnic_check_room(vnic, res_spec) == 0) { ret = usnic_ib_qp_grp_create(qp_grp, us_ibdev->ufdev, vf, pd, res_spec, trans_spec); mutex_unlock(&vf->lock); goto qp_grp_check; } mutex_unlock(&vf->lock); } usnic_info("No free qp grp found on %s\n", dev_name(&us_ibdev->ib_dev.dev)); return -ENOMEM; qp_grp_check: if (ret) { usnic_err("Failed to allocate qp_grp\n"); if (usnic_ib_share_vf) usnic_uiom_free_dev_list(dev_list); } return ret; } static void qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp) { struct usnic_ib_vf *vf = qp_grp->vf; WARN_ON(qp_grp->state != IB_QPS_RESET); mutex_lock(&vf->lock); usnic_ib_qp_grp_destroy(qp_grp); mutex_unlock(&vf->lock); } static int create_qp_validate_user_data(struct usnic_ib_create_qp_cmd cmd) { if (cmd.spec.trans_type <= USNIC_TRANSPORT_UNKNOWN || cmd.spec.trans_type >= USNIC_TRANSPORT_MAX) return -EINVAL; return 0; } /* Start of ib callback functions */ enum rdma_link_layer usnic_ib_port_link_layer(struct ib_device *device, u32 port_num) { return IB_LINK_LAYER_ETHERNET; } int usnic_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props, struct ib_udata *uhw) { struct usnic_ib_dev *us_ibdev = to_usdev(ibdev); union ib_gid gid; struct ethtool_drvinfo info; int qp_per_vf; usnic_dbg("\n"); if (uhw->inlen || uhw->outlen) return -EINVAL; mutex_lock(&us_ibdev->usdev_lock); us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info); memset(props, 0, sizeof(*props)); usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr, &gid.raw[0]); memcpy(&props->sys_image_guid, &gid.global.interface_id, sizeof(gid.global.interface_id)); usnic_ib_fw_string_to_u64(&info.fw_version[0], &props->fw_ver); props->max_mr_size = USNIC_UIOM_MAX_MR_SIZE; props->page_size_cap = USNIC_UIOM_PAGE_SIZE; props->vendor_id = PCI_VENDOR_ID_CISCO; props->vendor_part_id = PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC; props->hw_ver = us_ibdev->pdev->subsystem_device; qp_per_vf = max(us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_WQ], us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_RQ]); props->max_qp = qp_per_vf * kref_read(&us_ibdev->vf_cnt); props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SYS_IMAGE_GUID; props->kernel_cap_flags = IBK_BLOCK_MULTICAST_LOOPBACK; props->max_cq = us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ] * kref_read(&us_ibdev->vf_cnt); props->max_pd = USNIC_UIOM_MAX_PD_CNT; props->max_mr = USNIC_UIOM_MAX_MR_CNT; props->local_ca_ack_delay = 0; props->max_pkeys = 0; props->atomic_cap = IB_ATOMIC_NONE; props->masked_atomic_cap = props->atomic_cap; props->max_qp_rd_atom = 0; props->max_qp_init_rd_atom = 0; props->max_res_rd_atom = 0; props->max_srq = 0; props->max_srq_wr = 0; props->max_srq_sge = 0; props->max_fast_reg_page_list_len = 0; props->max_mcast_grp = 0; props->max_mcast_qp_attach = 0; props->max_total_mcast_qp_attach = 0; /* Owned by Userspace * max_qp_wr, max_sge, max_sge_rd, max_cqe */ mutex_unlock(&us_ibdev->usdev_lock); return 0; } int usnic_ib_query_port(struct ib_device *ibdev, u32 port, struct ib_port_attr *props) { struct usnic_ib_dev *us_ibdev = to_usdev(ibdev); usnic_dbg("\n"); if (ib_get_eth_speed(ibdev, port, &props->active_speed, &props->active_width)) return -EINVAL; /* * usdev_lock is acquired after (and not before) ib_get_eth_speed call * because acquiring rtnl_lock in ib_get_eth_speed, while holding * usdev_lock could lead to a deadlock. */ mutex_lock(&us_ibdev->usdev_lock); /* props being zeroed by the caller, avoid zeroing it here */ props->lid = 0; props->lmc = 1; props->sm_lid = 0; props->sm_sl = 0; if (!us_ibdev->ufdev->link_up) { props->state = IB_PORT_DOWN; props->phys_state = IB_PORT_PHYS_STATE_DISABLED; } else if (!us_ibdev->ufdev->inaddr) { props->state = IB_PORT_INIT; props->phys_state = IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING; } else { props->state = IB_PORT_ACTIVE; props->phys_state = IB_PORT_PHYS_STATE_LINK_UP; } props->port_cap_flags = 0; props->gid_tbl_len = 1; props->bad_pkey_cntr = 0; props->qkey_viol_cntr = 0; props->max_mtu = IB_MTU_4096; props->active_mtu = iboe_get_mtu(us_ibdev->ufdev->mtu); /* Userspace will adjust for hdrs */ props->max_msg_sz = us_ibdev->ufdev->mtu; props->max_vl_num = 1; mutex_unlock(&us_ibdev->usdev_lock); return 0; } int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) { struct usnic_ib_qp_grp *qp_grp; struct usnic_ib_vf *vf; int err; usnic_dbg("\n"); memset(qp_attr, 0, sizeof(*qp_attr)); memset(qp_init_attr, 0, sizeof(*qp_init_attr)); qp_grp = to_uqp_grp(qp); vf = qp_grp->vf; mutex_lock(&vf->pf->usdev_lock); usnic_dbg("\n"); qp_attr->qp_state = qp_grp->state; qp_attr->cur_qp_state = qp_grp->state; switch (qp_grp->ibqp.qp_type) { case IB_QPT_UD: qp_attr->qkey = 0; break; default: usnic_err("Unexpected qp_type %d\n", qp_grp->ibqp.qp_type); err = -EINVAL; goto err_out; } mutex_unlock(&vf->pf->usdev_lock); return 0; err_out: mutex_unlock(&vf->pf->usdev_lock); return err; } int usnic_ib_query_gid(struct ib_device *ibdev, u32 port, int index, union ib_gid *gid) { struct usnic_ib_dev *us_ibdev = to_usdev(ibdev); usnic_dbg("\n"); if (index > 1) return -EINVAL; mutex_lock(&us_ibdev->usdev_lock); memset(&(gid->raw[0]), 0, sizeof(gid->raw)); usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr, &gid->raw[0]); mutex_unlock(&us_ibdev->usdev_lock); return 0; } int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct usnic_ib_pd *pd = to_upd(ibpd); pd->umem_pd = usnic_uiom_alloc_pd(ibpd->device->dev.parent); if (IS_ERR(pd->umem_pd)) return PTR_ERR(pd->umem_pd); return 0; } int usnic_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) { usnic_uiom_dealloc_pd((to_upd(pd))->umem_pd); return 0; } int usnic_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr, struct ib_udata *udata) { int err; struct usnic_ib_dev *us_ibdev; struct usnic_ib_qp_grp *qp_grp = to_uqp_grp(ibqp); struct usnic_ib_ucontext *ucontext = rdma_udata_to_drv_context( udata, struct usnic_ib_ucontext, ibucontext); int cq_cnt; struct usnic_vnic_res_spec res_spec; struct usnic_ib_create_qp_cmd cmd; struct usnic_transport_spec trans_spec; usnic_dbg("\n"); us_ibdev = to_usdev(ibqp->device); if (init_attr->create_flags) return -EOPNOTSUPP; err = ib_copy_from_udata(&cmd, udata, sizeof(cmd)); if (err) { usnic_err("%s: cannot copy udata for create_qp\n", dev_name(&us_ibdev->ib_dev.dev)); return -EINVAL; } err = create_qp_validate_user_data(cmd); if (err) { usnic_err("%s: Failed to validate user data\n", dev_name(&us_ibdev->ib_dev.dev)); return -EINVAL; } if (init_attr->qp_type != IB_QPT_UD) { usnic_err("%s asked to make a non-UD QP: %d\n", dev_name(&us_ibdev->ib_dev.dev), init_attr->qp_type); return -EOPNOTSUPP; } trans_spec = cmd.spec; mutex_lock(&us_ibdev->usdev_lock); cq_cnt = (init_attr->send_cq == init_attr->recv_cq) ? 1 : 2; res_spec = min_transport_spec[trans_spec.trans_type]; usnic_vnic_res_spec_update(&res_spec, USNIC_VNIC_RES_TYPE_CQ, cq_cnt); err = find_free_vf_and_create_qp_grp(ibqp, &trans_spec, &res_spec); if (err) goto out_release_mutex; err = usnic_ib_fill_create_qp_resp(qp_grp, udata); if (err) { err = -EBUSY; goto out_release_qp_grp; } qp_grp->ctx = ucontext; list_add_tail(&qp_grp->link, &ucontext->qp_grp_list); usnic_ib_log_vf(qp_grp->vf); mutex_unlock(&us_ibdev->usdev_lock); return 0; out_release_qp_grp: qp_grp_destroy(qp_grp); out_release_mutex: mutex_unlock(&us_ibdev->usdev_lock); return err; } int usnic_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) { struct usnic_ib_qp_grp *qp_grp; struct usnic_ib_vf *vf; usnic_dbg("\n"); qp_grp = to_uqp_grp(qp); vf = qp_grp->vf; mutex_lock(&vf->pf->usdev_lock); if (usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RESET, NULL)) { usnic_err("Failed to move qp grp %u to reset\n", qp_grp->grp_id); } list_del(&qp_grp->link); qp_grp_destroy(qp_grp); mutex_unlock(&vf->pf->usdev_lock); return 0; } int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { struct usnic_ib_qp_grp *qp_grp; int status; usnic_dbg("\n"); if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) return -EOPNOTSUPP; qp_grp = to_uqp_grp(ibqp); mutex_lock(&qp_grp->vf->pf->usdev_lock); if ((attr_mask & IB_QP_PORT) && attr->port_num != 1) { /* usnic devices only have one port */ status = -EINVAL; goto out_unlock; } if (attr_mask & IB_QP_STATE) { status = usnic_ib_qp_grp_modify(qp_grp, attr->qp_state, NULL); } else { usnic_err("Unhandled request, attr_mask=0x%x\n", attr_mask); status = -EINVAL; } out_unlock: mutex_unlock(&qp_grp->vf->pf->usdev_lock); return status; } int usnic_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct ib_udata *udata) { if (attr->flags) return -EOPNOTSUPP; return 0; } int usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) { return 0; } struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt_addr, int access_flags, struct ib_udata *udata) { struct usnic_ib_mr *mr; int err; usnic_dbg("start 0x%llx va 0x%llx length 0x%llx\n", start, virt_addr, length); mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) return ERR_PTR(-ENOMEM); mr->umem = usnic_uiom_reg_get(to_upd(pd)->umem_pd, start, length, access_flags, 0); if (IS_ERR_OR_NULL(mr->umem)) { err = mr->umem ? PTR_ERR(mr->umem) : -EFAULT; goto err_free; } mr->ibmr.lkey = mr->ibmr.rkey = 0; return &mr->ibmr; err_free: kfree(mr); return ERR_PTR(err); } int usnic_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) { struct usnic_ib_mr *mr = to_umr(ibmr); usnic_dbg("va 0x%lx length 0x%zx\n", mr->umem->va, mr->umem->length); usnic_uiom_reg_release(mr->umem); kfree(mr); return 0; } int usnic_ib_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) { struct ib_device *ibdev = uctx->device; struct usnic_ib_ucontext *context = to_ucontext(uctx); struct usnic_ib_dev *us_ibdev = to_usdev(ibdev); usnic_dbg("\n"); INIT_LIST_HEAD(&context->qp_grp_list); mutex_lock(&us_ibdev->usdev_lock); list_add_tail(&context->link, &us_ibdev->ctx_list); mutex_unlock(&us_ibdev->usdev_lock); return 0; } void usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) { struct usnic_ib_ucontext *context = to_uucontext(ibcontext); struct usnic_ib_dev *us_ibdev = to_usdev(ibcontext->device); usnic_dbg("\n"); mutex_lock(&us_ibdev->usdev_lock); WARN_ON_ONCE(!list_empty(&context->qp_grp_list)); list_del(&context->link); mutex_unlock(&us_ibdev->usdev_lock); } int usnic_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) { struct usnic_ib_ucontext *uctx = to_ucontext(context); struct usnic_ib_dev *us_ibdev; struct usnic_ib_qp_grp *qp_grp; struct usnic_ib_vf *vf; struct vnic_dev_bar *bar; dma_addr_t bus_addr; unsigned int len; unsigned int vfid; usnic_dbg("\n"); us_ibdev = to_usdev(context->device); vm_flags_set(vma, VM_IO); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vfid = vma->vm_pgoff; usnic_dbg("Page Offset %lu PAGE_SHIFT %u VFID %u\n", vma->vm_pgoff, PAGE_SHIFT, vfid); mutex_lock(&us_ibdev->usdev_lock); list_for_each_entry(qp_grp, &uctx->qp_grp_list, link) { vf = qp_grp->vf; if (usnic_vnic_get_index(vf->vnic) == vfid) { bar = usnic_vnic_get_bar(vf->vnic, 0); if ((vma->vm_end - vma->vm_start) != bar->len) { usnic_err("Bar0 Len %lu - Request map %lu\n", bar->len, vma->vm_end - vma->vm_start); mutex_unlock(&us_ibdev->usdev_lock); return -EINVAL; } bus_addr = bar->bus_addr; len = bar->len; usnic_dbg("bus: %pa vaddr: %p size: %ld\n", &bus_addr, bar->vaddr, bar->len); mutex_unlock(&us_ibdev->usdev_lock); return remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT, len, vma->vm_page_prot); } } mutex_unlock(&us_ibdev->usdev_lock); usnic_err("No VF %u found\n", vfid); return -EINVAL; }
linux-master
drivers/infiniband/hw/usnic/usnic_ib_verbs.c
/* * Copyright (c) 2014, Cisco Systems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/init.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/list_sort.h> #include <linux/interval_tree_generic.h> #include "usnic_uiom_interval_tree.h" #define START(node) ((node)->start) #define LAST(node) ((node)->last) #define MAKE_NODE(node, start, end, ref_cnt, flags, err, err_out) \ do { \ node = usnic_uiom_interval_node_alloc(start, \ end, ref_cnt, flags); \ if (!node) { \ err = -ENOMEM; \ goto err_out; \ } \ } while (0) #define MARK_FOR_ADD(node, list) (list_add_tail(&node->link, list)) #define MAKE_NODE_AND_APPEND(node, start, end, ref_cnt, flags, err, \ err_out, list) \ do { \ MAKE_NODE(node, start, end, \ ref_cnt, flags, err, \ err_out); \ MARK_FOR_ADD(node, list); \ } while (0) #define FLAGS_EQUAL(flags1, flags2, mask) \ (((flags1) & (mask)) == ((flags2) & (mask))) static struct usnic_uiom_interval_node* usnic_uiom_interval_node_alloc(long int start, long int last, int ref_cnt, int flags) { struct usnic_uiom_interval_node *interval = kzalloc(sizeof(*interval), GFP_ATOMIC); if (!interval) return NULL; interval->start = start; interval->last = last; interval->flags = flags; interval->ref_cnt = ref_cnt; return interval; } static int interval_cmp(void *priv, const struct list_head *a, const struct list_head *b) { struct usnic_uiom_interval_node *node_a, *node_b; node_a = list_entry(a, struct usnic_uiom_interval_node, link); node_b = list_entry(b, struct usnic_uiom_interval_node, link); /* long to int */ if (node_a->start < node_b->start) return -1; else if (node_a->start > node_b->start) return 1; return 0; } static void find_intervals_intersection_sorted(struct rb_root_cached *root, unsigned long start, unsigned long last, struct list_head *list) { struct usnic_uiom_interval_node *node; INIT_LIST_HEAD(list); for (node = usnic_uiom_interval_tree_iter_first(root, start, last); node; node = usnic_uiom_interval_tree_iter_next(node, start, last)) list_add_tail(&node->link, list); list_sort(NULL, list, interval_cmp); } int usnic_uiom_get_intervals_diff(unsigned long start, unsigned long last, int flags, int flag_mask, struct rb_root_cached *root, struct list_head *diff_set) { struct usnic_uiom_interval_node *interval, *tmp; int err = 0; long int pivot = start; LIST_HEAD(intersection_set); INIT_LIST_HEAD(diff_set); find_intervals_intersection_sorted(root, start, last, &intersection_set); list_for_each_entry(interval, &intersection_set, link) { if (pivot < interval->start) { MAKE_NODE_AND_APPEND(tmp, pivot, interval->start - 1, 1, flags, err, err_out, diff_set); pivot = interval->start; } /* * Invariant: Set [start, pivot] is either in diff_set or root, * but not in both. */ if (pivot > interval->last) { continue; } else if (pivot <= interval->last && FLAGS_EQUAL(interval->flags, flags, flag_mask)) { pivot = interval->last + 1; } } if (pivot <= last) MAKE_NODE_AND_APPEND(tmp, pivot, last, 1, flags, err, err_out, diff_set); return 0; err_out: list_for_each_entry_safe(interval, tmp, diff_set, link) { list_del(&interval->link); kfree(interval); } return err; } void usnic_uiom_put_interval_set(struct list_head *intervals) { struct usnic_uiom_interval_node *interval, *tmp; list_for_each_entry_safe(interval, tmp, intervals, link) kfree(interval); } int usnic_uiom_insert_interval(struct rb_root_cached *root, unsigned long start, unsigned long last, int flags) { struct usnic_uiom_interval_node *interval, *tmp; unsigned long istart, ilast; int iref_cnt, iflags; unsigned long lpivot = start; int err = 0; LIST_HEAD(to_add); LIST_HEAD(intersection_set); find_intervals_intersection_sorted(root, start, last, &intersection_set); list_for_each_entry(interval, &intersection_set, link) { /* * Invariant - lpivot is the left edge of next interval to be * inserted */ istart = interval->start; ilast = interval->last; iref_cnt = interval->ref_cnt; iflags = interval->flags; if (istart < lpivot) { MAKE_NODE_AND_APPEND(tmp, istart, lpivot - 1, iref_cnt, iflags, err, err_out, &to_add); } else if (istart > lpivot) { MAKE_NODE_AND_APPEND(tmp, lpivot, istart - 1, 1, flags, err, err_out, &to_add); lpivot = istart; } else { lpivot = istart; } if (ilast > last) { MAKE_NODE_AND_APPEND(tmp, lpivot, last, iref_cnt + 1, iflags | flags, err, err_out, &to_add); MAKE_NODE_AND_APPEND(tmp, last + 1, ilast, iref_cnt, iflags, err, err_out, &to_add); } else { MAKE_NODE_AND_APPEND(tmp, lpivot, ilast, iref_cnt + 1, iflags | flags, err, err_out, &to_add); } lpivot = ilast + 1; } if (lpivot <= last) MAKE_NODE_AND_APPEND(tmp, lpivot, last, 1, flags, err, err_out, &to_add); list_for_each_entry_safe(interval, tmp, &intersection_set, link) { usnic_uiom_interval_tree_remove(interval, root); kfree(interval); } list_for_each_entry(interval, &to_add, link) usnic_uiom_interval_tree_insert(interval, root); return 0; err_out: list_for_each_entry_safe(interval, tmp, &to_add, link) kfree(interval); return err; } void usnic_uiom_remove_interval(struct rb_root_cached *root, unsigned long start, unsigned long last, struct list_head *removed) { struct usnic_uiom_interval_node *interval; for (interval = usnic_uiom_interval_tree_iter_first(root, start, last); interval; interval = usnic_uiom_interval_tree_iter_next(interval, start, last)) { if (--interval->ref_cnt == 0) list_add_tail(&interval->link, removed); } list_for_each_entry(interval, removed, link) usnic_uiom_interval_tree_remove(interval, root); } INTERVAL_TREE_DEFINE(struct usnic_uiom_interval_node, rb, unsigned long, __subtree_last, START, LAST, , usnic_uiom_interval_tree)
linux-master
drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB /* Copyright (c) 2015 - 2021 Intel Corporation */ #include "osdep.h" #include "type.h" #include "i40iw_hw.h" #include "protos.h" static u32 i40iw_regs[IRDMA_MAX_REGS] = { I40E_PFPE_CQPTAIL, I40E_PFPE_CQPDB, I40E_PFPE_CCQPSTATUS, I40E_PFPE_CCQPHIGH, I40E_PFPE_CCQPLOW, I40E_PFPE_CQARM, I40E_PFPE_CQACK, I40E_PFPE_AEQALLOC, I40E_PFPE_CQPERRCODES, I40E_PFPE_WQEALLOC, I40E_PFINT_DYN_CTLN(0), I40IW_DB_ADDR_OFFSET, I40E_GLPCI_LBARCTRL, I40E_GLPE_CPUSTATUS0, I40E_GLPE_CPUSTATUS1, I40E_GLPE_CPUSTATUS2, I40E_PFINT_AEQCTL, I40E_PFINT_CEQCTL(0), I40E_VSIQF_CTL(0), I40E_PFHMC_PDINV, I40E_GLHMC_VFPDINV(0), I40E_GLPE_CRITERR, 0xffffffff /* PFINT_RATEN not used in FPK */ }; static u32 i40iw_stat_offsets[] = { I40E_GLPES_PFIP4RXDISCARD(0), I40E_GLPES_PFIP4RXTRUNC(0), I40E_GLPES_PFIP4TXNOROUTE(0), I40E_GLPES_PFIP6RXDISCARD(0), I40E_GLPES_PFIP6RXTRUNC(0), I40E_GLPES_PFIP6TXNOROUTE(0), I40E_GLPES_PFTCPRTXSEG(0), I40E_GLPES_PFTCPRXOPTERR(0), I40E_GLPES_PFTCPRXPROTOERR(0), I40E_GLPES_PFRXVLANERR(0), I40E_GLPES_PFIP4RXOCTSLO(0), I40E_GLPES_PFIP4RXPKTSLO(0), I40E_GLPES_PFIP4RXFRAGSLO(0), I40E_GLPES_PFIP4RXMCPKTSLO(0), I40E_GLPES_PFIP4TXOCTSLO(0), I40E_GLPES_PFIP4TXPKTSLO(0), I40E_GLPES_PFIP4TXFRAGSLO(0), I40E_GLPES_PFIP4TXMCPKTSLO(0), I40E_GLPES_PFIP6RXOCTSLO(0), I40E_GLPES_PFIP6RXPKTSLO(0), I40E_GLPES_PFIP6RXFRAGSLO(0), I40E_GLPES_PFIP6RXMCPKTSLO(0), I40E_GLPES_PFIP6TXOCTSLO(0), I40E_GLPES_PFIP6TXPKTSLO(0), I40E_GLPES_PFIP6TXFRAGSLO(0), I40E_GLPES_PFIP6TXMCPKTSLO(0), I40E_GLPES_PFTCPRXSEGSLO(0), I40E_GLPES_PFTCPTXSEGLO(0), I40E_GLPES_PFRDMARXRDSLO(0), I40E_GLPES_PFRDMARXSNDSLO(0), I40E_GLPES_PFRDMARXWRSLO(0), I40E_GLPES_PFRDMATXRDSLO(0), I40E_GLPES_PFRDMATXSNDSLO(0), I40E_GLPES_PFRDMATXWRSLO(0), I40E_GLPES_PFRDMAVBNDLO(0), I40E_GLPES_PFRDMAVINVLO(0), I40E_GLPES_PFIP4RXMCOCTSLO(0), I40E_GLPES_PFIP4TXMCOCTSLO(0), I40E_GLPES_PFIP6RXMCOCTSLO(0), I40E_GLPES_PFIP6TXMCOCTSLO(0), I40E_GLPES_PFUDPRXPKTSLO(0), I40E_GLPES_PFUDPTXPKTSLO(0) }; static u64 i40iw_masks[IRDMA_MAX_MASKS] = { I40E_PFPE_CCQPSTATUS_CCQP_DONE, I40E_PFPE_CCQPSTATUS_CCQP_ERR, I40E_CQPSQ_STAG_PDID, I40E_CQPSQ_CQ_CEQID, I40E_CQPSQ_CQ_CQID, I40E_COMMIT_FPM_CQCNT, }; static u64 i40iw_shifts[IRDMA_MAX_SHIFTS] = { I40E_PFPE_CCQPSTATUS_CCQP_DONE_S, I40E_PFPE_CCQPSTATUS_CCQP_ERR_S, I40E_CQPSQ_STAG_PDID_S, I40E_CQPSQ_CQ_CEQID_S, I40E_CQPSQ_CQ_CQID_S, I40E_COMMIT_FPM_CQCNT_S, }; /** * i40iw_config_ceq- Configure CEQ interrupt * @dev: pointer to the device structure * @ceq_id: Completion Event Queue ID * @idx: vector index * @enable: Enable CEQ interrupt when true */ static void i40iw_config_ceq(struct irdma_sc_dev *dev, u32 ceq_id, u32 idx, bool enable) { u32 reg_val; reg_val = FIELD_PREP(I40E_PFINT_LNKLSTN_FIRSTQ_INDX, ceq_id) | FIELD_PREP(I40E_PFINT_LNKLSTN_FIRSTQ_TYPE, QUEUE_TYPE_CEQ); wr32(dev->hw, I40E_PFINT_LNKLSTN(idx - 1), reg_val); reg_val = FIELD_PREP(I40E_PFINT_DYN_CTLN_ITR_INDX, 0x3) | FIELD_PREP(I40E_PFINT_DYN_CTLN_INTENA, 0x1); wr32(dev->hw, I40E_PFINT_DYN_CTLN(idx - 1), reg_val); reg_val = FIELD_PREP(IRDMA_GLINT_CEQCTL_CAUSE_ENA, enable) | FIELD_PREP(IRDMA_GLINT_CEQCTL_MSIX_INDX, idx) | FIELD_PREP(I40E_PFINT_CEQCTL_NEXTQ_INDX, NULL_QUEUE_INDEX) | FIELD_PREP(IRDMA_GLINT_CEQCTL_ITR_INDX, 0x3); wr32(dev->hw, i40iw_regs[IRDMA_GLINT_CEQCTL] + 4 * ceq_id, reg_val); } /** * i40iw_ena_irq - Enable interrupt * @dev: pointer to the device structure * @idx: vector index */ static void i40iw_ena_irq(struct irdma_sc_dev *dev, u32 idx) { u32 val; val = FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTENA, 0x1) | FIELD_PREP(IRDMA_GLINT_DYN_CTL_CLEARPBA, 0x1) | FIELD_PREP(IRDMA_GLINT_DYN_CTL_ITR_INDX, 0x3); wr32(dev->hw, i40iw_regs[IRDMA_GLINT_DYN_CTL] + 4 * (idx - 1), val); } /** * i40iw_disable_irq - Disable interrupt * @dev: pointer to the device structure * @idx: vector index */ static void i40iw_disable_irq(struct irdma_sc_dev *dev, u32 idx) { wr32(dev->hw, i40iw_regs[IRDMA_GLINT_DYN_CTL] + 4 * (idx - 1), 0); } static const struct irdma_irq_ops i40iw_irq_ops = { .irdma_cfg_aeq = irdma_cfg_aeq, .irdma_cfg_ceq = i40iw_config_ceq, .irdma_dis_irq = i40iw_disable_irq, .irdma_en_irq = i40iw_ena_irq, }; static const struct irdma_hw_stat_map i40iw_hw_stat_map[] = { [IRDMA_HW_STAT_INDEX_RXVLANERR] = { 0, 0, IRDMA_MAX_STATS_24 }, [IRDMA_HW_STAT_INDEX_IP4RXOCTS] = { 8, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_IP4RXPKTS] = { 16, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_IP4RXDISCARD] = { 24, 0, IRDMA_MAX_STATS_32 }, [IRDMA_HW_STAT_INDEX_IP4RXTRUNC] = { 32, 0, IRDMA_MAX_STATS_32 }, [IRDMA_HW_STAT_INDEX_IP4RXFRAGS] = { 40, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS] = { 48, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_IP6RXOCTS] = { 56, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_IP6RXPKTS] = { 64, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_IP6RXDISCARD] = { 72, 0, IRDMA_MAX_STATS_32 }, [IRDMA_HW_STAT_INDEX_IP6RXTRUNC] = { 80, 0, IRDMA_MAX_STATS_32 }, [IRDMA_HW_STAT_INDEX_IP6RXFRAGS] = { 88, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS] = { 96, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_IP4TXOCTS] = { 104, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_IP4TXPKTS] = { 112, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_IP4TXFRAGS] = { 120, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS] = { 128, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_IP6TXOCTS] = { 136, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_IP6TXPKTS] = { 144, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_IP6TXFRAGS] = { 152, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS] = { 160, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] = { 168, 0, IRDMA_MAX_STATS_24 }, [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] = { 176, 0, IRDMA_MAX_STATS_24 }, [IRDMA_HW_STAT_INDEX_TCPRXSEGS] = { 184, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_TCPRXOPTERR] = { 192, 0, IRDMA_MAX_STATS_24 }, [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] = { 200, 0, IRDMA_MAX_STATS_24 }, [IRDMA_HW_STAT_INDEX_TCPTXSEG] = { 208, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_TCPRTXSEG] = { 216, 0, IRDMA_MAX_STATS_32 }, [IRDMA_HW_STAT_INDEX_RDMARXWRS] = { 224, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_RDMARXRDS] = { 232, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_RDMARXSNDS] = { 240, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_RDMATXWRS] = { 248, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_RDMATXRDS] = { 256, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_RDMATXSNDS] = { 264, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_RDMAVBND] = { 272, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_RDMAVINV] = { 280, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS] = { 288, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS] = { 296, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS] = { 304, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS] = { 312, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_UDPRXPKTS] = { 320, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_UDPTXPKTS] = { 328, 0, IRDMA_MAX_STATS_48 }, }; void i40iw_init_hw(struct irdma_sc_dev *dev) { int i; u8 __iomem *hw_addr; for (i = 0; i < IRDMA_MAX_REGS; ++i) { hw_addr = dev->hw->hw_addr; if (i == IRDMA_DB_ADDR_OFFSET) hw_addr = NULL; dev->hw_regs[i] = (u32 __iomem *)(i40iw_regs[i] + hw_addr); } for (i = 0; i < IRDMA_HW_STAT_INDEX_MAX_GEN_1; ++i) dev->hw_stats_regs[i] = i40iw_stat_offsets[i]; dev->hw_attrs.first_hw_vf_fpm_id = I40IW_FIRST_VF_FPM_ID; dev->hw_attrs.max_hw_vf_fpm_id = IRDMA_MAX_VF_FPM_ID; for (i = 0; i < IRDMA_MAX_SHIFTS; ++i) dev->hw_shifts[i] = i40iw_shifts[i]; for (i = 0; i < IRDMA_MAX_MASKS; ++i) dev->hw_masks[i] = i40iw_masks[i]; dev->wqe_alloc_db = dev->hw_regs[IRDMA_WQEALLOC]; dev->cq_arm_db = dev->hw_regs[IRDMA_CQARM]; dev->aeq_alloc_db = dev->hw_regs[IRDMA_AEQALLOC]; dev->cqp_db = dev->hw_regs[IRDMA_CQPDB]; dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK]; dev->ceq_itr_mask_db = NULL; dev->aeq_itr_mask_db = NULL; dev->irq_ops = &i40iw_irq_ops; dev->hw_stats_map = i40iw_hw_stat_map; /* Setup the hardware limits, hmc may limit further */ dev->hw_attrs.uk_attrs.max_hw_wq_frags = I40IW_MAX_WQ_FRAGMENT_COUNT; dev->hw_attrs.uk_attrs.max_hw_read_sges = I40IW_MAX_SGE_RD; dev->hw_attrs.max_hw_device_pages = I40IW_MAX_PUSH_PAGE_COUNT; dev->hw_attrs.uk_attrs.max_hw_inline = I40IW_MAX_INLINE_DATA_SIZE; dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M; dev->hw_attrs.max_hw_ird = I40IW_MAX_IRD_SIZE; dev->hw_attrs.max_hw_ord = I40IW_MAX_ORD_SIZE; dev->hw_attrs.max_hw_wqes = I40IW_MAX_WQ_ENTRIES; dev->hw_attrs.uk_attrs.max_hw_rq_quanta = I40IW_QP_SW_MAX_RQ_QUANTA; dev->hw_attrs.uk_attrs.max_hw_wq_quanta = I40IW_QP_SW_MAX_WQ_QUANTA; dev->hw_attrs.uk_attrs.max_hw_sq_chunk = I40IW_MAX_QUANTA_PER_WR; dev->hw_attrs.max_hw_pds = I40IW_MAX_PDS; dev->hw_attrs.max_stat_inst = I40IW_MAX_STATS_COUNT; dev->hw_attrs.max_stat_idx = IRDMA_HW_STAT_INDEX_MAX_GEN_1; dev->hw_attrs.max_hw_outbound_msg_size = I40IW_MAX_OUTBOUND_MSG_SIZE; dev->hw_attrs.max_hw_inbound_msg_size = I40IW_MAX_INBOUND_MSG_SIZE; dev->hw_attrs.uk_attrs.min_hw_wq_size = I40IW_MIN_WQ_SIZE; dev->hw_attrs.max_qp_wr = I40IW_MAX_QP_WRS; }
linux-master
drivers/infiniband/hw/irdma/i40iw_hw.c
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB /* Copyright (c) 2019 Intel Corporation */ #define CREATE_TRACE_POINTS #include "trace.h" const char *print_ip_addr(struct trace_seq *p, u32 *addr, u16 port, bool ipv4) { const char *ret = trace_seq_buffer_ptr(p); if (ipv4) { __be32 myaddr = htonl(*addr); trace_seq_printf(p, "%pI4:%d", &myaddr, htons(port)); } else { trace_seq_printf(p, "%pI6:%d", addr, htons(port)); } trace_seq_putc(p, 0); return ret; } const char *parse_iw_event_type(enum iw_cm_event_type iw_type) { switch (iw_type) { case IW_CM_EVENT_CONNECT_REQUEST: return "IwRequest"; case IW_CM_EVENT_CONNECT_REPLY: return "IwReply"; case IW_CM_EVENT_ESTABLISHED: return "IwEstablished"; case IW_CM_EVENT_DISCONNECT: return "IwDisconnect"; case IW_CM_EVENT_CLOSE: return "IwClose"; } return "Unknown"; } const char *parse_cm_event_type(enum irdma_cm_event_type cm_type) { switch (cm_type) { case IRDMA_CM_EVENT_ESTABLISHED: return "CmEstablished"; case IRDMA_CM_EVENT_MPA_REQ: return "CmMPA_REQ"; case IRDMA_CM_EVENT_MPA_CONNECT: return "CmMPA_CONNECT"; case IRDMA_CM_EVENT_MPA_ACCEPT: return "CmMPA_ACCEPT"; case IRDMA_CM_EVENT_MPA_REJECT: return "CmMPA_REJECT"; case IRDMA_CM_EVENT_MPA_ESTABLISHED: return "CmMPA_ESTABLISHED"; case IRDMA_CM_EVENT_CONNECTED: return "CmConnected"; case IRDMA_CM_EVENT_RESET: return "CmReset"; case IRDMA_CM_EVENT_ABORTED: return "CmAborted"; case IRDMA_CM_EVENT_UNKNOWN: return "none"; } return "Unknown"; } const char *parse_cm_state(enum irdma_cm_node_state state) { switch (state) { case IRDMA_CM_STATE_UNKNOWN: return "UNKNOWN"; case IRDMA_CM_STATE_INITED: return "INITED"; case IRDMA_CM_STATE_LISTENING: return "LISTENING"; case IRDMA_CM_STATE_SYN_RCVD: return "SYN_RCVD"; case IRDMA_CM_STATE_SYN_SENT: return "SYN_SENT"; case IRDMA_CM_STATE_ONE_SIDE_ESTABLISHED: return "ONE_SIDE_ESTABLISHED"; case IRDMA_CM_STATE_ESTABLISHED: return "ESTABLISHED"; case IRDMA_CM_STATE_ACCEPTING: return "ACCEPTING"; case IRDMA_CM_STATE_MPAREQ_SENT: return "MPAREQ_SENT"; case IRDMA_CM_STATE_MPAREQ_RCVD: return "MPAREQ_RCVD"; case IRDMA_CM_STATE_MPAREJ_RCVD: return "MPAREJ_RECVD"; case IRDMA_CM_STATE_OFFLOADED: return "OFFLOADED"; case IRDMA_CM_STATE_FIN_WAIT1: return "FIN_WAIT1"; case IRDMA_CM_STATE_FIN_WAIT2: return "FIN_WAIT2"; case IRDMA_CM_STATE_CLOSE_WAIT: return "CLOSE_WAIT"; case IRDMA_CM_STATE_TIME_WAIT: return "TIME_WAIT"; case IRDMA_CM_STATE_LAST_ACK: return "LAST_ACK"; case IRDMA_CM_STATE_CLOSING: return "CLOSING"; case IRDMA_CM_STATE_LISTENER_DESTROYED: return "LISTENER_DESTROYED"; case IRDMA_CM_STATE_CLOSED: return "CLOSED"; } return ("Bad state"); }
linux-master
drivers/infiniband/hw/irdma/trace.c
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB /* Copyright (c) 2016 - 2021 Intel Corporation */ #include <linux/etherdevice.h> #include "osdep.h" #include "hmc.h" #include "defs.h" #include "type.h" #include "protos.h" #include "uda.h" #include "uda_d.h" /** * irdma_sc_access_ah() - Create, modify or delete AH * @cqp: struct for cqp hw * @info: ah information * @op: Operation * @scratch: u64 saved to be used during cqp completion */ int irdma_sc_access_ah(struct irdma_sc_cqp *cqp, struct irdma_ah_info *info, u32 op, u64 scratch) { __le64 *wqe; u64 qw1, qw2; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) return -ENOMEM; set_64bit_val(wqe, 0, ether_addr_to_u64(info->mac_addr) << 16); qw1 = FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_PDINDEXLO, info->pd_idx) | FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_TC, info->tc_tos) | FIELD_PREP(IRDMA_UDAQPC_VLANTAG, info->vlan_tag); qw2 = FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ARPINDEX, info->dst_arpindex) | FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_FLOWLABEL, info->flow_label) | FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_HOPLIMIT, info->hop_ttl) | FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_PDINDEXHI, info->pd_idx >> 16); if (!info->ipv4_valid) { set_64bit_val(wqe, 40, FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->dest_ip_addr[0]) | FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->dest_ip_addr[1])); set_64bit_val(wqe, 32, FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->dest_ip_addr[2]) | FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[3])); set_64bit_val(wqe, 56, FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->src_ip_addr[0]) | FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->src_ip_addr[1])); set_64bit_val(wqe, 48, FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->src_ip_addr[2]) | FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->src_ip_addr[3])); } else { set_64bit_val(wqe, 32, FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[0])); set_64bit_val(wqe, 48, FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->src_ip_addr[0])); } set_64bit_val(wqe, 8, qw1); set_64bit_val(wqe, 16, qw2); dma_wmb(); /* need write block before writing WQE header */ set_64bit_val( wqe, 24, FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_WQEVALID, cqp->polarity) | FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_OPCODE, op) | FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_DOLOOPBACKK, info->do_lpbk) | FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_IPV4VALID, info->ipv4_valid) | FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_AVIDX, info->ah_idx) | FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_INSERTVLANTAG, info->insert_vlan_tag)); print_hex_dump_debug("WQE: MANAGE_AH WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); irdma_sc_cqp_post_sq(cqp); return 0; } /** * irdma_create_mg_ctx() - create a mcg context * @info: multicast group context info */ static void irdma_create_mg_ctx(struct irdma_mcast_grp_info *info) { struct irdma_mcast_grp_ctx_entry_info *entry_info = NULL; u8 idx = 0; /* index in the array */ u8 ctx_idx = 0; /* index in the MG context */ memset(info->dma_mem_mc.va, 0, IRDMA_MAX_MGS_PER_CTX * sizeof(u64)); for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) { entry_info = &info->mg_ctx_info[idx]; if (entry_info->valid_entry) { set_64bit_val((__le64 *)info->dma_mem_mc.va, ctx_idx * sizeof(u64), FIELD_PREP(IRDMA_UDA_MGCTX_DESTPORT, entry_info->dest_port) | FIELD_PREP(IRDMA_UDA_MGCTX_VALIDENT, entry_info->valid_entry) | FIELD_PREP(IRDMA_UDA_MGCTX_QPID, entry_info->qp_id)); ctx_idx++; } } } /** * irdma_access_mcast_grp() - Access mcast group based on op * @cqp: Control QP * @info: multicast group context info * @op: operation to perform * @scratch: u64 saved to be used during cqp completion */ int irdma_access_mcast_grp(struct irdma_sc_cqp *cqp, struct irdma_mcast_grp_info *info, u32 op, u64 scratch) { __le64 *wqe; if (info->mg_id >= IRDMA_UDA_MAX_FSI_MGS) { ibdev_dbg(to_ibdev(cqp->dev), "WQE: mg_id out of range\n"); return -EINVAL; } wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) { ibdev_dbg(to_ibdev(cqp->dev), "WQE: ring full\n"); return -ENOMEM; } irdma_create_mg_ctx(info); set_64bit_val(wqe, 32, info->dma_mem_mc.pa); set_64bit_val(wqe, 16, FIELD_PREP(IRDMA_UDA_CQPSQ_MG_VLANID, info->vlan_id) | FIELD_PREP(IRDMA_UDA_CQPSQ_QS_HANDLE, info->qs_handle)); set_64bit_val(wqe, 0, ether_addr_to_u64(info->dest_mac_addr)); set_64bit_val(wqe, 8, FIELD_PREP(IRDMA_UDA_CQPSQ_MG_HMC_FCN_ID, info->hmc_fcn_id)); if (!info->ipv4_valid) { set_64bit_val(wqe, 56, FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->dest_ip_addr[0]) | FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->dest_ip_addr[1])); set_64bit_val(wqe, 48, FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->dest_ip_addr[2]) | FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[3])); } else { set_64bit_val(wqe, 48, FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[0])); } dma_wmb(); /* need write memory block before writing the WQE header. */ set_64bit_val(wqe, 24, FIELD_PREP(IRDMA_UDA_CQPSQ_MG_WQEVALID, cqp->polarity) | FIELD_PREP(IRDMA_UDA_CQPSQ_MG_OPCODE, op) | FIELD_PREP(IRDMA_UDA_CQPSQ_MG_MGIDX, info->mg_id) | FIELD_PREP(IRDMA_UDA_CQPSQ_MG_VLANVALID, info->vlan_valid) | FIELD_PREP(IRDMA_UDA_CQPSQ_MG_IPV4VALID, info->ipv4_valid)); print_hex_dump_debug("WQE: MANAGE_MCG WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); print_hex_dump_debug("WQE: MCG_HOST CTX WQE", DUMP_PREFIX_OFFSET, 16, 8, info->dma_mem_mc.va, IRDMA_MAX_MGS_PER_CTX * 8, false); irdma_sc_cqp_post_sq(cqp); return 0; } /** * irdma_compare_mgs - Compares two multicast group structures * @entry1: Multcast group info * @entry2: Multcast group info in context */ static bool irdma_compare_mgs(struct irdma_mcast_grp_ctx_entry_info *entry1, struct irdma_mcast_grp_ctx_entry_info *entry2) { if (entry1->dest_port == entry2->dest_port && entry1->qp_id == entry2->qp_id) return true; return false; } /** * irdma_sc_add_mcast_grp - Allocates mcast group entry in ctx * @ctx: Multcast group context * @mg: Multcast group info */ int irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info *ctx, struct irdma_mcast_grp_ctx_entry_info *mg) { u32 idx; bool free_entry_found = false; u32 free_entry_idx = 0; /* find either an identical or a free entry for a multicast group */ for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) { if (ctx->mg_ctx_info[idx].valid_entry) { if (irdma_compare_mgs(&ctx->mg_ctx_info[idx], mg)) { ctx->mg_ctx_info[idx].use_cnt++; return 0; } continue; } if (!free_entry_found) { free_entry_found = true; free_entry_idx = idx; } } if (free_entry_found) { ctx->mg_ctx_info[free_entry_idx] = *mg; ctx->mg_ctx_info[free_entry_idx].valid_entry = true; ctx->mg_ctx_info[free_entry_idx].use_cnt = 1; ctx->no_of_mgs++; return 0; } return -ENOMEM; } /** * irdma_sc_del_mcast_grp - Delete mcast group * @ctx: Multcast group context * @mg: Multcast group info * * Finds and removes a specific mulicast group from context, all * parameters must match to remove a multicast group. */ int irdma_sc_del_mcast_grp(struct irdma_mcast_grp_info *ctx, struct irdma_mcast_grp_ctx_entry_info *mg) { u32 idx; /* find an entry in multicast group context */ for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) { if (!ctx->mg_ctx_info[idx].valid_entry) continue; if (irdma_compare_mgs(mg, &ctx->mg_ctx_info[idx])) { ctx->mg_ctx_info[idx].use_cnt--; if (!ctx->mg_ctx_info[idx].use_cnt) { ctx->mg_ctx_info[idx].valid_entry = false; ctx->no_of_mgs--; /* Remove gap if element was not the last */ if (idx != ctx->no_of_mgs && ctx->no_of_mgs > 0) { memcpy(&ctx->mg_ctx_info[idx], &ctx->mg_ctx_info[ctx->no_of_mgs - 1], sizeof(ctx->mg_ctx_info[idx])); ctx->mg_ctx_info[ctx->no_of_mgs - 1].valid_entry = false; } } return 0; } } return -EINVAL; }
linux-master
drivers/infiniband/hw/irdma/uda.c
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB /* Copyright (c) 2015 - 2021 Intel Corporation */ #include "main.h" static struct irdma_rsrc_limits rsrc_limits_table[] = { [0] = { .qplimit = SZ_128, }, [1] = { .qplimit = SZ_1K, }, [2] = { .qplimit = SZ_2K, }, [3] = { .qplimit = SZ_4K, }, [4] = { .qplimit = SZ_16K, }, [5] = { .qplimit = SZ_64K, }, [6] = { .qplimit = SZ_128K, }, [7] = { .qplimit = SZ_256K, }, }; /* types of hmc objects */ static enum irdma_hmc_rsrc_type iw_hmc_obj_types[] = { IRDMA_HMC_IW_QP, IRDMA_HMC_IW_CQ, IRDMA_HMC_IW_HTE, IRDMA_HMC_IW_ARP, IRDMA_HMC_IW_APBVT_ENTRY, IRDMA_HMC_IW_MR, IRDMA_HMC_IW_XF, IRDMA_HMC_IW_XFFL, IRDMA_HMC_IW_Q1, IRDMA_HMC_IW_Q1FL, IRDMA_HMC_IW_PBLE, IRDMA_HMC_IW_TIMER, IRDMA_HMC_IW_FSIMC, IRDMA_HMC_IW_FSIAV, IRDMA_HMC_IW_RRF, IRDMA_HMC_IW_RRFFL, IRDMA_HMC_IW_HDR, IRDMA_HMC_IW_MD, IRDMA_HMC_IW_OOISC, IRDMA_HMC_IW_OOISCFFL, }; /** * irdma_iwarp_ce_handler - handle iwarp completions * @iwcq: iwarp cq receiving event */ static void irdma_iwarp_ce_handler(struct irdma_sc_cq *iwcq) { struct irdma_cq *cq = iwcq->back_cq; if (!cq->user_mode) atomic_set(&cq->armed, 0); if (cq->ibcq.comp_handler) cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); } /** * irdma_puda_ce_handler - handle puda completion events * @rf: RDMA PCI function * @cq: puda completion q for event */ static void irdma_puda_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq) { struct irdma_sc_dev *dev = &rf->sc_dev; u32 compl_error; int status; do { status = irdma_puda_poll_cmpl(dev, cq, &compl_error); if (status == -ENOENT) break; if (status) { ibdev_dbg(to_ibdev(dev), "ERR: puda status = %d\n", status); break; } if (compl_error) { ibdev_dbg(to_ibdev(dev), "ERR: puda compl_err =0x%x\n", compl_error); break; } } while (1); irdma_sc_ccq_arm(cq); } /** * irdma_process_ceq - handle ceq for completions * @rf: RDMA PCI function * @ceq: ceq having cq for completion */ static void irdma_process_ceq(struct irdma_pci_f *rf, struct irdma_ceq *ceq) { struct irdma_sc_dev *dev = &rf->sc_dev; struct irdma_sc_ceq *sc_ceq; struct irdma_sc_cq *cq; unsigned long flags; sc_ceq = &ceq->sc_ceq; do { spin_lock_irqsave(&ceq->ce_lock, flags); cq = irdma_sc_process_ceq(dev, sc_ceq); if (!cq) { spin_unlock_irqrestore(&ceq->ce_lock, flags); break; } if (cq->cq_type == IRDMA_CQ_TYPE_IWARP) irdma_iwarp_ce_handler(cq); spin_unlock_irqrestore(&ceq->ce_lock, flags); if (cq->cq_type == IRDMA_CQ_TYPE_CQP) queue_work(rf->cqp_cmpl_wq, &rf->cqp_cmpl_work); else if (cq->cq_type == IRDMA_CQ_TYPE_ILQ || cq->cq_type == IRDMA_CQ_TYPE_IEQ) irdma_puda_ce_handler(rf, cq); } while (1); } static void irdma_set_flush_fields(struct irdma_sc_qp *qp, struct irdma_aeqe_info *info) { qp->sq_flush_code = info->sq; qp->rq_flush_code = info->rq; qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; switch (info->ae_id) { case IRDMA_AE_AMP_BOUNDS_VIOLATION: case IRDMA_AE_AMP_INVALID_STAG: case IRDMA_AE_AMP_RIGHTS_VIOLATION: case IRDMA_AE_AMP_UNALLOCATED_STAG: case IRDMA_AE_AMP_BAD_PD: case IRDMA_AE_AMP_BAD_QP: case IRDMA_AE_AMP_BAD_STAG_KEY: case IRDMA_AE_AMP_BAD_STAG_INDEX: case IRDMA_AE_AMP_TO_WRAP: case IRDMA_AE_PRIV_OPERATION_DENIED: qp->flush_code = FLUSH_PROT_ERR; qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; break; case IRDMA_AE_UDA_XMIT_BAD_PD: case IRDMA_AE_WQE_UNEXPECTED_OPCODE: qp->flush_code = FLUSH_LOC_QP_OP_ERR; qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; break; case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG: case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT: case IRDMA_AE_UDA_L4LEN_INVALID: case IRDMA_AE_DDP_UBE_INVALID_MO: case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER: qp->flush_code = FLUSH_LOC_LEN_ERR; qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; break; case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS: case IRDMA_AE_IB_REMOTE_ACCESS_ERROR: qp->flush_code = FLUSH_REM_ACCESS_ERR; qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; break; case IRDMA_AE_LLP_SEGMENT_TOO_SMALL: case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR: case IRDMA_AE_ROCE_RSP_LENGTH_ERROR: case IRDMA_AE_IB_REMOTE_OP_ERROR: qp->flush_code = FLUSH_REM_OP_ERR; qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; break; case IRDMA_AE_LCE_QP_CATASTROPHIC: qp->flush_code = FLUSH_FATAL_ERR; qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; break; case IRDMA_AE_IB_RREQ_AND_Q1_FULL: qp->flush_code = FLUSH_GENERAL_ERR; break; case IRDMA_AE_LLP_TOO_MANY_RETRIES: qp->flush_code = FLUSH_RETRY_EXC_ERR; qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; break; case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS: case IRDMA_AE_AMP_MWBIND_BIND_DISABLED: case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS: case IRDMA_AE_AMP_MWBIND_VALID_STAG: qp->flush_code = FLUSH_MW_BIND_ERR; qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; break; case IRDMA_AE_IB_INVALID_REQUEST: qp->flush_code = FLUSH_REM_INV_REQ_ERR; qp->event_type = IRDMA_QP_EVENT_REQ_ERR; break; default: qp->flush_code = FLUSH_GENERAL_ERR; qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; break; } } /** * irdma_process_aeq - handle aeq events * @rf: RDMA PCI function */ static void irdma_process_aeq(struct irdma_pci_f *rf) { struct irdma_sc_dev *dev = &rf->sc_dev; struct irdma_aeq *aeq = &rf->aeq; struct irdma_sc_aeq *sc_aeq = &aeq->sc_aeq; struct irdma_aeqe_info aeinfo; struct irdma_aeqe_info *info = &aeinfo; int ret; struct irdma_qp *iwqp = NULL; struct irdma_cq *iwcq = NULL; struct irdma_sc_qp *qp = NULL; struct irdma_qp_host_ctx_info *ctx_info = NULL; struct irdma_device *iwdev = rf->iwdev; unsigned long flags; u32 aeqcnt = 0; if (!sc_aeq->size) return; do { memset(info, 0, sizeof(*info)); ret = irdma_sc_get_next_aeqe(sc_aeq, info); if (ret) break; aeqcnt++; ibdev_dbg(&iwdev->ibdev, "AEQ: ae_id = 0x%x bool qp=%d qp_id = %d tcp_state=%d iwarp_state=%d ae_src=%d\n", info->ae_id, info->qp, info->qp_cq_id, info->tcp_state, info->iwarp_state, info->ae_src); if (info->qp) { spin_lock_irqsave(&rf->qptable_lock, flags); iwqp = rf->qp_table[info->qp_cq_id]; if (!iwqp) { spin_unlock_irqrestore(&rf->qptable_lock, flags); if (info->ae_id == IRDMA_AE_QP_SUSPEND_COMPLETE) { atomic_dec(&iwdev->vsi.qp_suspend_reqs); wake_up(&iwdev->suspend_wq); continue; } ibdev_dbg(&iwdev->ibdev, "AEQ: qp_id %d is already freed\n", info->qp_cq_id); continue; } irdma_qp_add_ref(&iwqp->ibqp); spin_unlock_irqrestore(&rf->qptable_lock, flags); qp = &iwqp->sc_qp; spin_lock_irqsave(&iwqp->lock, flags); iwqp->hw_tcp_state = info->tcp_state; iwqp->hw_iwarp_state = info->iwarp_state; if (info->ae_id != IRDMA_AE_QP_SUSPEND_COMPLETE) iwqp->last_aeq = info->ae_id; spin_unlock_irqrestore(&iwqp->lock, flags); ctx_info = &iwqp->ctx_info; } else { if (info->ae_id != IRDMA_AE_CQ_OPERATION_ERROR) continue; } switch (info->ae_id) { struct irdma_cm_node *cm_node; case IRDMA_AE_LLP_CONNECTION_ESTABLISHED: cm_node = iwqp->cm_node; if (cm_node->accept_pend) { atomic_dec(&cm_node->listener->pend_accepts_cnt); cm_node->accept_pend = 0; } iwqp->rts_ae_rcvd = 1; wake_up_interruptible(&iwqp->waitq); break; case IRDMA_AE_LLP_FIN_RECEIVED: case IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE: if (qp->term_flags) break; if (atomic_inc_return(&iwqp->close_timer_started) == 1) { iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSE_WAIT; if (iwqp->hw_tcp_state == IRDMA_TCP_STATE_CLOSE_WAIT && iwqp->ibqp_state == IB_QPS_RTS) { irdma_next_iw_state(iwqp, IRDMA_QP_STATE_CLOSING, 0, 0, 0); irdma_cm_disconn(iwqp); } irdma_schedule_cm_timer(iwqp->cm_node, (struct irdma_puda_buf *)iwqp, IRDMA_TIMER_TYPE_CLOSE, 1, 0); } break; case IRDMA_AE_LLP_CLOSE_COMPLETE: if (qp->term_flags) irdma_terminate_done(qp, 0); else irdma_cm_disconn(iwqp); break; case IRDMA_AE_BAD_CLOSE: case IRDMA_AE_RESET_SENT: irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0, 0); irdma_cm_disconn(iwqp); break; case IRDMA_AE_LLP_CONNECTION_RESET: if (atomic_read(&iwqp->close_timer_started)) break; irdma_cm_disconn(iwqp); break; case IRDMA_AE_QP_SUSPEND_COMPLETE: if (iwqp->iwdev->vsi.tc_change_pending) { atomic_dec(&iwqp->sc_qp.vsi->qp_suspend_reqs); wake_up(&iwqp->iwdev->suspend_wq); } break; case IRDMA_AE_TERMINATE_SENT: irdma_terminate_send_fin(qp); break; case IRDMA_AE_LLP_TERMINATE_RECEIVED: irdma_terminate_received(qp, info); break; case IRDMA_AE_CQ_OPERATION_ERROR: ibdev_err(&iwdev->ibdev, "Processing an iWARP related AE for CQ misc = 0x%04X\n", info->ae_id); spin_lock_irqsave(&rf->cqtable_lock, flags); iwcq = rf->cq_table[info->qp_cq_id]; if (!iwcq) { spin_unlock_irqrestore(&rf->cqtable_lock, flags); ibdev_dbg(to_ibdev(dev), "cq_id %d is already freed\n", info->qp_cq_id); continue; } irdma_cq_add_ref(&iwcq->ibcq); spin_unlock_irqrestore(&rf->cqtable_lock, flags); if (iwcq->ibcq.event_handler) { struct ib_event ibevent; ibevent.device = iwcq->ibcq.device; ibevent.event = IB_EVENT_CQ_ERR; ibevent.element.cq = &iwcq->ibcq; iwcq->ibcq.event_handler(&ibevent, iwcq->ibcq.cq_context); } irdma_cq_rem_ref(&iwcq->ibcq); break; case IRDMA_AE_RESET_NOT_SENT: case IRDMA_AE_LLP_DOUBT_REACHABILITY: case IRDMA_AE_RESOURCE_EXHAUSTION: break; case IRDMA_AE_PRIV_OPERATION_DENIED: case IRDMA_AE_STAG_ZERO_INVALID: case IRDMA_AE_IB_RREQ_AND_Q1_FULL: case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION: case IRDMA_AE_DDP_UBE_INVALID_MO: case IRDMA_AE_DDP_UBE_INVALID_QN: case IRDMA_AE_DDP_NO_L_BIT: case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION: case IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE: case IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST: case IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP: case IRDMA_AE_INVALID_ARP_ENTRY: case IRDMA_AE_INVALID_TCP_OPTION_RCVD: case IRDMA_AE_STALE_ARP_ENTRY: case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR: case IRDMA_AE_LLP_SEGMENT_TOO_SMALL: case IRDMA_AE_LLP_SYN_RECEIVED: case IRDMA_AE_LLP_TOO_MANY_RETRIES: case IRDMA_AE_LCE_QP_CATASTROPHIC: case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC: case IRDMA_AE_LCE_CQ_CATASTROPHIC: case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG: default: ibdev_err(&iwdev->ibdev, "abnormal ae_id = 0x%x bool qp=%d qp_id = %d, ae_src=%d\n", info->ae_id, info->qp, info->qp_cq_id, info->ae_src); if (rdma_protocol_roce(&iwdev->ibdev, 1)) { ctx_info->roce_info->err_rq_idx_valid = info->rq; if (info->rq) { ctx_info->roce_info->err_rq_idx = info->wqe_idx; irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info); } irdma_set_flush_fields(qp, info); irdma_cm_disconn(iwqp); break; } ctx_info->iwarp_info->err_rq_idx_valid = info->rq; if (info->rq) { ctx_info->iwarp_info->err_rq_idx = info->wqe_idx; ctx_info->tcp_info_valid = false; ctx_info->iwarp_info_valid = true; irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info); } if (iwqp->hw_iwarp_state != IRDMA_QP_STATE_RTS && iwqp->hw_iwarp_state != IRDMA_QP_STATE_TERMINATE) { irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0, 0); irdma_cm_disconn(iwqp); } else { irdma_terminate_connection(qp, info); } break; } if (info->qp) irdma_qp_rem_ref(&iwqp->ibqp); } while (1); if (aeqcnt) irdma_sc_repost_aeq_entries(dev, aeqcnt); } /** * irdma_ena_intr - set up device interrupts * @dev: hardware control device structure * @msix_id: id of the interrupt to be enabled */ static void irdma_ena_intr(struct irdma_sc_dev *dev, u32 msix_id) { dev->irq_ops->irdma_en_irq(dev, msix_id); } /** * irdma_dpc - tasklet for aeq and ceq 0 * @t: tasklet_struct ptr */ static void irdma_dpc(struct tasklet_struct *t) { struct irdma_pci_f *rf = from_tasklet(rf, t, dpc_tasklet); if (rf->msix_shared) irdma_process_ceq(rf, rf->ceqlist); irdma_process_aeq(rf); irdma_ena_intr(&rf->sc_dev, rf->iw_msixtbl[0].idx); } /** * irdma_ceq_dpc - dpc handler for CEQ * @t: tasklet_struct ptr */ static void irdma_ceq_dpc(struct tasklet_struct *t) { struct irdma_ceq *iwceq = from_tasklet(iwceq, t, dpc_tasklet); struct irdma_pci_f *rf = iwceq->rf; irdma_process_ceq(rf, iwceq); irdma_ena_intr(&rf->sc_dev, iwceq->msix_idx); } /** * irdma_save_msix_info - copy msix vector information to iwarp device * @rf: RDMA PCI function * * Allocate iwdev msix table and copy the msix info to the table * Return 0 if successful, otherwise return error */ static int irdma_save_msix_info(struct irdma_pci_f *rf) { struct irdma_qvlist_info *iw_qvlist; struct irdma_qv_info *iw_qvinfo; struct msix_entry *pmsix; u32 ceq_idx; u32 i; size_t size; if (!rf->msix_count) return -EINVAL; size = sizeof(struct irdma_msix_vector) * rf->msix_count; size += struct_size(iw_qvlist, qv_info, rf->msix_count); rf->iw_msixtbl = kzalloc(size, GFP_KERNEL); if (!rf->iw_msixtbl) return -ENOMEM; rf->iw_qvlist = (struct irdma_qvlist_info *) (&rf->iw_msixtbl[rf->msix_count]); iw_qvlist = rf->iw_qvlist; iw_qvinfo = iw_qvlist->qv_info; iw_qvlist->num_vectors = rf->msix_count; if (rf->msix_count <= num_online_cpus()) rf->msix_shared = true; else if (rf->msix_count > num_online_cpus() + 1) rf->msix_count = num_online_cpus() + 1; pmsix = rf->msix_entries; for (i = 0, ceq_idx = 0; i < rf->msix_count; i++, iw_qvinfo++) { rf->iw_msixtbl[i].idx = pmsix->entry; rf->iw_msixtbl[i].irq = pmsix->vector; rf->iw_msixtbl[i].cpu_affinity = ceq_idx; if (!i) { iw_qvinfo->aeq_idx = 0; if (rf->msix_shared) iw_qvinfo->ceq_idx = ceq_idx++; else iw_qvinfo->ceq_idx = IRDMA_Q_INVALID_IDX; } else { iw_qvinfo->aeq_idx = IRDMA_Q_INVALID_IDX; iw_qvinfo->ceq_idx = ceq_idx++; } iw_qvinfo->itr_idx = 3; iw_qvinfo->v_idx = rf->iw_msixtbl[i].idx; pmsix++; } return 0; } /** * irdma_irq_handler - interrupt handler for aeq and ceq0 * @irq: Interrupt request number * @data: RDMA PCI function */ static irqreturn_t irdma_irq_handler(int irq, void *data) { struct irdma_pci_f *rf = data; tasklet_schedule(&rf->dpc_tasklet); return IRQ_HANDLED; } /** * irdma_ceq_handler - interrupt handler for ceq * @irq: interrupt request number * @data: ceq pointer */ static irqreturn_t irdma_ceq_handler(int irq, void *data) { struct irdma_ceq *iwceq = data; if (iwceq->irq != irq) ibdev_err(to_ibdev(&iwceq->rf->sc_dev), "expected irq = %d received irq = %d\n", iwceq->irq, irq); tasklet_schedule(&iwceq->dpc_tasklet); return IRQ_HANDLED; } /** * irdma_destroy_irq - destroy device interrupts * @rf: RDMA PCI function * @msix_vec: msix vector to disable irq * @dev_id: parameter to pass to free_irq (used during irq setup) * * The function is called when destroying aeq/ceq */ static void irdma_destroy_irq(struct irdma_pci_f *rf, struct irdma_msix_vector *msix_vec, void *dev_id) { struct irdma_sc_dev *dev = &rf->sc_dev; dev->irq_ops->irdma_dis_irq(dev, msix_vec->idx); irq_update_affinity_hint(msix_vec->irq, NULL); free_irq(msix_vec->irq, dev_id); } /** * irdma_destroy_cqp - destroy control qp * @rf: RDMA PCI function * * Issue destroy cqp request and * free the resources associated with the cqp */ static void irdma_destroy_cqp(struct irdma_pci_f *rf) { struct irdma_sc_dev *dev = &rf->sc_dev; struct irdma_cqp *cqp = &rf->cqp; int status = 0; if (rf->cqp_cmpl_wq) destroy_workqueue(rf->cqp_cmpl_wq); status = irdma_sc_cqp_destroy(dev->cqp); if (status) ibdev_dbg(to_ibdev(dev), "ERR: Destroy CQP failed %d\n", status); irdma_cleanup_pending_cqp_op(rf); dma_free_coherent(dev->hw->device, cqp->sq.size, cqp->sq.va, cqp->sq.pa); cqp->sq.va = NULL; kfree(cqp->scratch_array); cqp->scratch_array = NULL; kfree(cqp->cqp_requests); cqp->cqp_requests = NULL; } static void irdma_destroy_virt_aeq(struct irdma_pci_f *rf) { struct irdma_aeq *aeq = &rf->aeq; u32 pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE); dma_addr_t *pg_arr = (dma_addr_t *)aeq->palloc.level1.addr; irdma_unmap_vm_page_list(&rf->hw, pg_arr, pg_cnt); irdma_free_pble(rf->pble_rsrc, &aeq->palloc); vfree(aeq->mem.va); } /** * irdma_destroy_aeq - destroy aeq * @rf: RDMA PCI function * * Issue a destroy aeq request and * free the resources associated with the aeq * The function is called during driver unload */ static void irdma_destroy_aeq(struct irdma_pci_f *rf) { struct irdma_sc_dev *dev = &rf->sc_dev; struct irdma_aeq *aeq = &rf->aeq; int status = -EBUSY; if (!rf->msix_shared) { rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, rf->iw_msixtbl->idx, false); irdma_destroy_irq(rf, rf->iw_msixtbl, rf); } if (rf->reset) goto exit; aeq->sc_aeq.size = 0; status = irdma_cqp_aeq_cmd(dev, &aeq->sc_aeq, IRDMA_OP_AEQ_DESTROY); if (status) ibdev_dbg(to_ibdev(dev), "ERR: Destroy AEQ failed %d\n", status); exit: if (aeq->virtual_map) { irdma_destroy_virt_aeq(rf); } else { dma_free_coherent(dev->hw->device, aeq->mem.size, aeq->mem.va, aeq->mem.pa); aeq->mem.va = NULL; } } /** * irdma_destroy_ceq - destroy ceq * @rf: RDMA PCI function * @iwceq: ceq to be destroyed * * Issue a destroy ceq request and * free the resources associated with the ceq */ static void irdma_destroy_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq) { struct irdma_sc_dev *dev = &rf->sc_dev; int status; if (rf->reset) goto exit; status = irdma_sc_ceq_destroy(&iwceq->sc_ceq, 0, 1); if (status) { ibdev_dbg(to_ibdev(dev), "ERR: CEQ destroy command failed %d\n", status); goto exit; } status = irdma_sc_cceq_destroy_done(&iwceq->sc_ceq); if (status) ibdev_dbg(to_ibdev(dev), "ERR: CEQ destroy completion failed %d\n", status); exit: dma_free_coherent(dev->hw->device, iwceq->mem.size, iwceq->mem.va, iwceq->mem.pa); iwceq->mem.va = NULL; } /** * irdma_del_ceq_0 - destroy ceq 0 * @rf: RDMA PCI function * * Disable the ceq 0 interrupt and destroy the ceq 0 */ static void irdma_del_ceq_0(struct irdma_pci_f *rf) { struct irdma_ceq *iwceq = rf->ceqlist; struct irdma_msix_vector *msix_vec; if (rf->msix_shared) { msix_vec = &rf->iw_msixtbl[0]; rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, msix_vec->ceq_id, msix_vec->idx, false); irdma_destroy_irq(rf, msix_vec, rf); } else { msix_vec = &rf->iw_msixtbl[1]; irdma_destroy_irq(rf, msix_vec, iwceq); } irdma_destroy_ceq(rf, iwceq); rf->sc_dev.ceq_valid = false; rf->ceqs_count = 0; } /** * irdma_del_ceqs - destroy all ceq's except CEQ 0 * @rf: RDMA PCI function * * Go through all of the device ceq's, except 0, and for each * ceq disable the ceq interrupt and destroy the ceq */ static void irdma_del_ceqs(struct irdma_pci_f *rf) { struct irdma_ceq *iwceq = &rf->ceqlist[1]; struct irdma_msix_vector *msix_vec; u32 i = 0; if (rf->msix_shared) msix_vec = &rf->iw_msixtbl[1]; else msix_vec = &rf->iw_msixtbl[2]; for (i = 1; i < rf->ceqs_count; i++, msix_vec++, iwceq++) { rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, msix_vec->ceq_id, msix_vec->idx, false); irdma_destroy_irq(rf, msix_vec, iwceq); irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq, IRDMA_OP_CEQ_DESTROY); dma_free_coherent(rf->sc_dev.hw->device, iwceq->mem.size, iwceq->mem.va, iwceq->mem.pa); iwceq->mem.va = NULL; } rf->ceqs_count = 1; } /** * irdma_destroy_ccq - destroy control cq * @rf: RDMA PCI function * * Issue destroy ccq request and * free the resources associated with the ccq */ static void irdma_destroy_ccq(struct irdma_pci_f *rf) { struct irdma_sc_dev *dev = &rf->sc_dev; struct irdma_ccq *ccq = &rf->ccq; int status = 0; if (!rf->reset) status = irdma_sc_ccq_destroy(dev->ccq, 0, true); if (status) ibdev_dbg(to_ibdev(dev), "ERR: CCQ destroy failed %d\n", status); dma_free_coherent(dev->hw->device, ccq->mem_cq.size, ccq->mem_cq.va, ccq->mem_cq.pa); ccq->mem_cq.va = NULL; } /** * irdma_close_hmc_objects_type - delete hmc objects of a given type * @dev: iwarp device * @obj_type: the hmc object type to be deleted * @hmc_info: host memory info struct * @privileged: permission to close HMC objects * @reset: true if called before reset */ static void irdma_close_hmc_objects_type(struct irdma_sc_dev *dev, enum irdma_hmc_rsrc_type obj_type, struct irdma_hmc_info *hmc_info, bool privileged, bool reset) { struct irdma_hmc_del_obj_info info = {}; info.hmc_info = hmc_info; info.rsrc_type = obj_type; info.count = hmc_info->hmc_obj[obj_type].cnt; info.privileged = privileged; if (irdma_sc_del_hmc_obj(dev, &info, reset)) ibdev_dbg(to_ibdev(dev), "ERR: del HMC obj of type %d failed\n", obj_type); } /** * irdma_del_hmc_objects - remove all device hmc objects * @dev: iwarp device * @hmc_info: hmc_info to free * @privileged: permission to delete HMC objects * @reset: true if called before reset * @vers: hardware version */ static void irdma_del_hmc_objects(struct irdma_sc_dev *dev, struct irdma_hmc_info *hmc_info, bool privileged, bool reset, enum irdma_vers vers) { unsigned int i; for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) { if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt) irdma_close_hmc_objects_type(dev, iw_hmc_obj_types[i], hmc_info, privileged, reset); if (vers == IRDMA_GEN_1 && i == IRDMA_HMC_IW_TIMER) break; } } /** * irdma_create_hmc_obj_type - create hmc object of a given type * @dev: hardware control device structure * @info: information for the hmc object to create */ static int irdma_create_hmc_obj_type(struct irdma_sc_dev *dev, struct irdma_hmc_create_obj_info *info) { return irdma_sc_create_hmc_obj(dev, info); } /** * irdma_create_hmc_objs - create all hmc objects for the device * @rf: RDMA PCI function * @privileged: permission to create HMC objects * @vers: HW version * * Create the device hmc objects and allocate hmc pages * Return 0 if successful, otherwise clean up and return error */ static int irdma_create_hmc_objs(struct irdma_pci_f *rf, bool privileged, enum irdma_vers vers) { struct irdma_sc_dev *dev = &rf->sc_dev; struct irdma_hmc_create_obj_info info = {}; int i, status = 0; info.hmc_info = dev->hmc_info; info.privileged = privileged; info.entry_type = rf->sd_type; for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) { if (iw_hmc_obj_types[i] == IRDMA_HMC_IW_PBLE) continue; if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt) { info.rsrc_type = iw_hmc_obj_types[i]; info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt; info.add_sd_cnt = 0; status = irdma_create_hmc_obj_type(dev, &info); if (status) { ibdev_dbg(to_ibdev(dev), "ERR: create obj type %d status = %d\n", iw_hmc_obj_types[i], status); break; } } if (vers == IRDMA_GEN_1 && i == IRDMA_HMC_IW_TIMER) break; } if (!status) return irdma_sc_static_hmc_pages_allocated(dev->cqp, 0, dev->hmc_fn_id, true, true); while (i) { i--; /* destroy the hmc objects of a given type */ if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt) irdma_close_hmc_objects_type(dev, iw_hmc_obj_types[i], dev->hmc_info, privileged, false); } return status; } /** * irdma_obj_aligned_mem - get aligned memory from device allocated memory * @rf: RDMA PCI function * @memptr: points to the memory addresses * @size: size of memory needed * @mask: mask for the aligned memory * * Get aligned memory of the requested size and * update the memptr to point to the new aligned memory * Return 0 if successful, otherwise return no memory error */ static int irdma_obj_aligned_mem(struct irdma_pci_f *rf, struct irdma_dma_mem *memptr, u32 size, u32 mask) { unsigned long va, newva; unsigned long extra; va = (unsigned long)rf->obj_next.va; newva = va; if (mask) newva = ALIGN(va, (unsigned long)mask + 1ULL); extra = newva - va; memptr->va = (u8 *)va + extra; memptr->pa = rf->obj_next.pa + extra; memptr->size = size; if (((u8 *)memptr->va + size) > ((u8 *)rf->obj_mem.va + rf->obj_mem.size)) return -ENOMEM; rf->obj_next.va = (u8 *)memptr->va + size; rf->obj_next.pa = memptr->pa + size; return 0; } /** * irdma_create_cqp - create control qp * @rf: RDMA PCI function * * Return 0, if the cqp and all the resources associated with it * are successfully created, otherwise return error */ static int irdma_create_cqp(struct irdma_pci_f *rf) { u32 sqsize = IRDMA_CQP_SW_SQSIZE_2048; struct irdma_dma_mem mem; struct irdma_sc_dev *dev = &rf->sc_dev; struct irdma_cqp_init_info cqp_init_info = {}; struct irdma_cqp *cqp = &rf->cqp; u16 maj_err, min_err; int i, status; cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL); if (!cqp->cqp_requests) return -ENOMEM; cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL); if (!cqp->scratch_array) { status = -ENOMEM; goto err_scratch; } dev->cqp = &cqp->sc_cqp; dev->cqp->dev = dev; cqp->sq.size = ALIGN(sizeof(struct irdma_cqp_sq_wqe) * sqsize, IRDMA_CQP_ALIGNMENT); cqp->sq.va = dma_alloc_coherent(dev->hw->device, cqp->sq.size, &cqp->sq.pa, GFP_KERNEL); if (!cqp->sq.va) { status = -ENOMEM; goto err_sq; } status = irdma_obj_aligned_mem(rf, &mem, sizeof(struct irdma_cqp_ctx), IRDMA_HOST_CTX_ALIGNMENT_M); if (status) goto err_ctx; dev->cqp->host_ctx_pa = mem.pa; dev->cqp->host_ctx = mem.va; /* populate the cqp init info */ cqp_init_info.dev = dev; cqp_init_info.sq_size = sqsize; cqp_init_info.sq = cqp->sq.va; cqp_init_info.sq_pa = cqp->sq.pa; cqp_init_info.host_ctx_pa = mem.pa; cqp_init_info.host_ctx = mem.va; cqp_init_info.hmc_profile = rf->rsrc_profile; cqp_init_info.scratch_array = cqp->scratch_array; cqp_init_info.protocol_used = rf->protocol_used; switch (rf->rdma_ver) { case IRDMA_GEN_1: cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_1; break; case IRDMA_GEN_2: cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_2; break; } status = irdma_sc_cqp_init(dev->cqp, &cqp_init_info); if (status) { ibdev_dbg(to_ibdev(dev), "ERR: cqp init status %d\n", status); goto err_ctx; } spin_lock_init(&cqp->req_lock); spin_lock_init(&cqp->compl_lock); status = irdma_sc_cqp_create(dev->cqp, &maj_err, &min_err); if (status) { ibdev_dbg(to_ibdev(dev), "ERR: cqp create failed - status %d maj_err %d min_err %d\n", status, maj_err, min_err); goto err_ctx; } INIT_LIST_HEAD(&cqp->cqp_avail_reqs); INIT_LIST_HEAD(&cqp->cqp_pending_reqs); /* init the waitqueue of the cqp_requests and add them to the list */ for (i = 0; i < sqsize; i++) { init_waitqueue_head(&cqp->cqp_requests[i].waitq); list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs); } init_waitqueue_head(&cqp->remove_wq); return 0; err_ctx: dma_free_coherent(dev->hw->device, cqp->sq.size, cqp->sq.va, cqp->sq.pa); cqp->sq.va = NULL; err_sq: kfree(cqp->scratch_array); cqp->scratch_array = NULL; err_scratch: kfree(cqp->cqp_requests); cqp->cqp_requests = NULL; return status; } /** * irdma_create_ccq - create control cq * @rf: RDMA PCI function * * Return 0, if the ccq and the resources associated with it * are successfully created, otherwise return error */ static int irdma_create_ccq(struct irdma_pci_f *rf) { struct irdma_sc_dev *dev = &rf->sc_dev; struct irdma_ccq_init_info info = {}; struct irdma_ccq *ccq = &rf->ccq; int status; dev->ccq = &ccq->sc_cq; dev->ccq->dev = dev; info.dev = dev; ccq->shadow_area.size = sizeof(struct irdma_cq_shadow_area); ccq->mem_cq.size = ALIGN(sizeof(struct irdma_cqe) * IW_CCQ_SIZE, IRDMA_CQ0_ALIGNMENT); ccq->mem_cq.va = dma_alloc_coherent(dev->hw->device, ccq->mem_cq.size, &ccq->mem_cq.pa, GFP_KERNEL); if (!ccq->mem_cq.va) return -ENOMEM; status = irdma_obj_aligned_mem(rf, &ccq->shadow_area, ccq->shadow_area.size, IRDMA_SHADOWAREA_M); if (status) goto exit; ccq->sc_cq.back_cq = ccq; /* populate the ccq init info */ info.cq_base = ccq->mem_cq.va; info.cq_pa = ccq->mem_cq.pa; info.num_elem = IW_CCQ_SIZE; info.shadow_area = ccq->shadow_area.va; info.shadow_area_pa = ccq->shadow_area.pa; info.ceqe_mask = false; info.ceq_id_valid = true; info.shadow_read_threshold = 16; info.vsi = &rf->default_vsi; status = irdma_sc_ccq_init(dev->ccq, &info); if (!status) status = irdma_sc_ccq_create(dev->ccq, 0, true, true); exit: if (status) { dma_free_coherent(dev->hw->device, ccq->mem_cq.size, ccq->mem_cq.va, ccq->mem_cq.pa); ccq->mem_cq.va = NULL; } return status; } /** * irdma_alloc_set_mac - set up a mac address table entry * @iwdev: irdma device * * Allocate a mac ip entry and add it to the hw table Return 0 * if successful, otherwise return error */ static int irdma_alloc_set_mac(struct irdma_device *iwdev) { int status; status = irdma_alloc_local_mac_entry(iwdev->rf, &iwdev->mac_ip_table_idx); if (!status) { status = irdma_add_local_mac_entry(iwdev->rf, (const u8 *)iwdev->netdev->dev_addr, (u8)iwdev->mac_ip_table_idx); if (status) irdma_del_local_mac_entry(iwdev->rf, (u8)iwdev->mac_ip_table_idx); } return status; } /** * irdma_cfg_ceq_vector - set up the msix interrupt vector for * ceq * @rf: RDMA PCI function * @iwceq: ceq associated with the vector * @ceq_id: the id number of the iwceq * @msix_vec: interrupt vector information * * Allocate interrupt resources and enable irq handling * Return 0 if successful, otherwise return error */ static int irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq, u32 ceq_id, struct irdma_msix_vector *msix_vec) { int status; if (rf->msix_shared && !ceq_id) { snprintf(msix_vec->name, sizeof(msix_vec->name) - 1, "irdma-%s-AEQCEQ-0", dev_name(&rf->pcidev->dev)); tasklet_setup(&rf->dpc_tasklet, irdma_dpc); status = request_irq(msix_vec->irq, irdma_irq_handler, 0, msix_vec->name, rf); } else { snprintf(msix_vec->name, sizeof(msix_vec->name) - 1, "irdma-%s-CEQ-%d", dev_name(&rf->pcidev->dev), ceq_id); tasklet_setup(&iwceq->dpc_tasklet, irdma_ceq_dpc); status = request_irq(msix_vec->irq, irdma_ceq_handler, 0, msix_vec->name, iwceq); } cpumask_clear(&msix_vec->mask); cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask); irq_update_affinity_hint(msix_vec->irq, &msix_vec->mask); if (status) { ibdev_dbg(&rf->iwdev->ibdev, "ERR: ceq irq config fail\n"); return status; } msix_vec->ceq_id = ceq_id; rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, ceq_id, msix_vec->idx, true); return 0; } /** * irdma_cfg_aeq_vector - set up the msix vector for aeq * @rf: RDMA PCI function * * Allocate interrupt resources and enable irq handling * Return 0 if successful, otherwise return error */ static int irdma_cfg_aeq_vector(struct irdma_pci_f *rf) { struct irdma_msix_vector *msix_vec = rf->iw_msixtbl; u32 ret = 0; if (!rf->msix_shared) { snprintf(msix_vec->name, sizeof(msix_vec->name) - 1, "irdma-%s-AEQ", dev_name(&rf->pcidev->dev)); tasklet_setup(&rf->dpc_tasklet, irdma_dpc); ret = request_irq(msix_vec->irq, irdma_irq_handler, 0, msix_vec->name, rf); } if (ret) { ibdev_dbg(&rf->iwdev->ibdev, "ERR: aeq irq config fail\n"); return -EINVAL; } rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, msix_vec->idx, true); return 0; } /** * irdma_create_ceq - create completion event queue * @rf: RDMA PCI function * @iwceq: pointer to the ceq resources to be created * @ceq_id: the id number of the iwceq * @vsi: SC vsi struct * * Return 0, if the ceq and the resources associated with it * are successfully created, otherwise return error */ static int irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq, u32 ceq_id, struct irdma_sc_vsi *vsi) { int status; struct irdma_ceq_init_info info = {}; struct irdma_sc_dev *dev = &rf->sc_dev; u64 scratch; u32 ceq_size; info.ceq_id = ceq_id; iwceq->rf = rf; ceq_size = min(rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt, dev->hw_attrs.max_hw_ceq_size); iwceq->mem.size = ALIGN(sizeof(struct irdma_ceqe) * ceq_size, IRDMA_CEQ_ALIGNMENT); iwceq->mem.va = dma_alloc_coherent(dev->hw->device, iwceq->mem.size, &iwceq->mem.pa, GFP_KERNEL); if (!iwceq->mem.va) return -ENOMEM; info.ceq_id = ceq_id; info.ceqe_base = iwceq->mem.va; info.ceqe_pa = iwceq->mem.pa; info.elem_cnt = ceq_size; iwceq->sc_ceq.ceq_id = ceq_id; info.dev = dev; info.vsi = vsi; scratch = (uintptr_t)&rf->cqp.sc_cqp; status = irdma_sc_ceq_init(&iwceq->sc_ceq, &info); if (!status) { if (dev->ceq_valid) status = irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq, IRDMA_OP_CEQ_CREATE); else status = irdma_sc_cceq_create(&iwceq->sc_ceq, scratch); } if (status) { dma_free_coherent(dev->hw->device, iwceq->mem.size, iwceq->mem.va, iwceq->mem.pa); iwceq->mem.va = NULL; } return status; } /** * irdma_setup_ceq_0 - create CEQ 0 and it's interrupt resource * @rf: RDMA PCI function * * Allocate a list for all device completion event queues * Create the ceq 0 and configure it's msix interrupt vector * Return 0, if successfully set up, otherwise return error */ static int irdma_setup_ceq_0(struct irdma_pci_f *rf) { struct irdma_ceq *iwceq; struct irdma_msix_vector *msix_vec; u32 i; int status = 0; u32 num_ceqs; num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs); rf->ceqlist = kcalloc(num_ceqs, sizeof(*rf->ceqlist), GFP_KERNEL); if (!rf->ceqlist) { status = -ENOMEM; goto exit; } iwceq = &rf->ceqlist[0]; status = irdma_create_ceq(rf, iwceq, 0, &rf->default_vsi); if (status) { ibdev_dbg(&rf->iwdev->ibdev, "ERR: create ceq status = %d\n", status); goto exit; } spin_lock_init(&iwceq->ce_lock); i = rf->msix_shared ? 0 : 1; msix_vec = &rf->iw_msixtbl[i]; iwceq->irq = msix_vec->irq; iwceq->msix_idx = msix_vec->idx; status = irdma_cfg_ceq_vector(rf, iwceq, 0, msix_vec); if (status) { irdma_destroy_ceq(rf, iwceq); goto exit; } irdma_ena_intr(&rf->sc_dev, msix_vec->idx); rf->ceqs_count++; exit: if (status && !rf->ceqs_count) { kfree(rf->ceqlist); rf->ceqlist = NULL; return status; } rf->sc_dev.ceq_valid = true; return 0; } /** * irdma_setup_ceqs - manage the device ceq's and their interrupt resources * @rf: RDMA PCI function * @vsi: VSI structure for this CEQ * * Allocate a list for all device completion event queues * Create the ceq's and configure their msix interrupt vectors * Return 0, if ceqs are successfully set up, otherwise return error */ static int irdma_setup_ceqs(struct irdma_pci_f *rf, struct irdma_sc_vsi *vsi) { u32 i; u32 ceq_id; struct irdma_ceq *iwceq; struct irdma_msix_vector *msix_vec; int status; u32 num_ceqs; num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs); i = (rf->msix_shared) ? 1 : 2; for (ceq_id = 1; i < num_ceqs; i++, ceq_id++) { iwceq = &rf->ceqlist[ceq_id]; status = irdma_create_ceq(rf, iwceq, ceq_id, vsi); if (status) { ibdev_dbg(&rf->iwdev->ibdev, "ERR: create ceq status = %d\n", status); goto del_ceqs; } spin_lock_init(&iwceq->ce_lock); msix_vec = &rf->iw_msixtbl[i]; iwceq->irq = msix_vec->irq; iwceq->msix_idx = msix_vec->idx; status = irdma_cfg_ceq_vector(rf, iwceq, ceq_id, msix_vec); if (status) { irdma_destroy_ceq(rf, iwceq); goto del_ceqs; } irdma_ena_intr(&rf->sc_dev, msix_vec->idx); rf->ceqs_count++; } return 0; del_ceqs: irdma_del_ceqs(rf); return status; } static int irdma_create_virt_aeq(struct irdma_pci_f *rf, u32 size) { struct irdma_aeq *aeq = &rf->aeq; dma_addr_t *pg_arr; u32 pg_cnt; int status; if (rf->rdma_ver < IRDMA_GEN_2) return -EOPNOTSUPP; aeq->mem.size = sizeof(struct irdma_sc_aeqe) * size; aeq->mem.va = vzalloc(aeq->mem.size); if (!aeq->mem.va) return -ENOMEM; pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE); status = irdma_get_pble(rf->pble_rsrc, &aeq->palloc, pg_cnt, true); if (status) { vfree(aeq->mem.va); return status; } pg_arr = (dma_addr_t *)aeq->palloc.level1.addr; status = irdma_map_vm_page_list(&rf->hw, aeq->mem.va, pg_arr, pg_cnt); if (status) { irdma_free_pble(rf->pble_rsrc, &aeq->palloc); vfree(aeq->mem.va); return status; } return 0; } /** * irdma_create_aeq - create async event queue * @rf: RDMA PCI function * * Return 0, if the aeq and the resources associated with it * are successfully created, otherwise return error */ static int irdma_create_aeq(struct irdma_pci_f *rf) { struct irdma_aeq_init_info info = {}; struct irdma_sc_dev *dev = &rf->sc_dev; struct irdma_aeq *aeq = &rf->aeq; struct irdma_hmc_info *hmc_info = rf->sc_dev.hmc_info; u32 aeq_size; u8 multiplier = (rf->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) ? 2 : 1; int status; aeq_size = multiplier * hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt + hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt; aeq_size = min(aeq_size, dev->hw_attrs.max_hw_aeq_size); aeq->mem.size = ALIGN(sizeof(struct irdma_sc_aeqe) * aeq_size, IRDMA_AEQ_ALIGNMENT); aeq->mem.va = dma_alloc_coherent(dev->hw->device, aeq->mem.size, &aeq->mem.pa, GFP_KERNEL | __GFP_NOWARN); if (aeq->mem.va) goto skip_virt_aeq; /* physically mapped aeq failed. setup virtual aeq */ status = irdma_create_virt_aeq(rf, aeq_size); if (status) return status; info.virtual_map = true; aeq->virtual_map = info.virtual_map; info.pbl_chunk_size = 1; info.first_pm_pbl_idx = aeq->palloc.level1.idx; skip_virt_aeq: info.aeqe_base = aeq->mem.va; info.aeq_elem_pa = aeq->mem.pa; info.elem_cnt = aeq_size; info.dev = dev; info.msix_idx = rf->iw_msixtbl->idx; status = irdma_sc_aeq_init(&aeq->sc_aeq, &info); if (status) goto err; status = irdma_cqp_aeq_cmd(dev, &aeq->sc_aeq, IRDMA_OP_AEQ_CREATE); if (status) goto err; return 0; err: if (aeq->virtual_map) { irdma_destroy_virt_aeq(rf); } else { dma_free_coherent(dev->hw->device, aeq->mem.size, aeq->mem.va, aeq->mem.pa); aeq->mem.va = NULL; } return status; } /** * irdma_setup_aeq - set up the device aeq * @rf: RDMA PCI function * * Create the aeq and configure its msix interrupt vector * Return 0 if successful, otherwise return error */ static int irdma_setup_aeq(struct irdma_pci_f *rf) { struct irdma_sc_dev *dev = &rf->sc_dev; int status; status = irdma_create_aeq(rf); if (status) return status; status = irdma_cfg_aeq_vector(rf); if (status) { irdma_destroy_aeq(rf); return status; } if (!rf->msix_shared) irdma_ena_intr(dev, rf->iw_msixtbl[0].idx); return 0; } /** * irdma_initialize_ilq - create iwarp local queue for cm * @iwdev: irdma device * * Return 0 if successful, otherwise return error */ static int irdma_initialize_ilq(struct irdma_device *iwdev) { struct irdma_puda_rsrc_info info = {}; int status; info.type = IRDMA_PUDA_RSRC_TYPE_ILQ; info.cq_id = 1; info.qp_id = 1; info.count = 1; info.pd_id = 1; info.abi_ver = IRDMA_ABI_VER; info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768); info.rq_size = info.sq_size; info.buf_size = 1024; info.tx_buf_cnt = 2 * info.sq_size; info.receive = irdma_receive_ilq; info.xmit_complete = irdma_free_sqbuf; status = irdma_puda_create_rsrc(&iwdev->vsi, &info); if (status) ibdev_dbg(&iwdev->ibdev, "ERR: ilq create fail\n"); return status; } /** * irdma_initialize_ieq - create iwarp exception queue * @iwdev: irdma device * * Return 0 if successful, otherwise return error */ static int irdma_initialize_ieq(struct irdma_device *iwdev) { struct irdma_puda_rsrc_info info = {}; int status; info.type = IRDMA_PUDA_RSRC_TYPE_IEQ; info.cq_id = 2; info.qp_id = iwdev->vsi.exception_lan_q; info.count = 1; info.pd_id = 2; info.abi_ver = IRDMA_ABI_VER; info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768); info.rq_size = info.sq_size; info.buf_size = iwdev->vsi.mtu + IRDMA_IPV4_PAD; info.tx_buf_cnt = 4096; status = irdma_puda_create_rsrc(&iwdev->vsi, &info); if (status) ibdev_dbg(&iwdev->ibdev, "ERR: ieq create fail\n"); return status; } /** * irdma_reinitialize_ieq - destroy and re-create ieq * @vsi: VSI structure */ void irdma_reinitialize_ieq(struct irdma_sc_vsi *vsi) { struct irdma_device *iwdev = vsi->back_vsi; struct irdma_pci_f *rf = iwdev->rf; irdma_puda_dele_rsrc(vsi, IRDMA_PUDA_RSRC_TYPE_IEQ, false); if (irdma_initialize_ieq(iwdev)) { iwdev->rf->reset = true; rf->gen_ops.request_reset(rf); } } /** * irdma_hmc_setup - create hmc objects for the device * @rf: RDMA PCI function * * Set up the device private memory space for the number and size of * the hmc objects and create the objects * Return 0 if successful, otherwise return error */ static int irdma_hmc_setup(struct irdma_pci_f *rf) { int status; u32 qpcnt; qpcnt = rsrc_limits_table[rf->limits_sel].qplimit; rf->sd_type = IRDMA_SD_TYPE_DIRECT; status = irdma_cfg_fpm_val(&rf->sc_dev, qpcnt); if (status) return status; status = irdma_create_hmc_objs(rf, true, rf->rdma_ver); return status; } /** * irdma_del_init_mem - deallocate memory resources * @rf: RDMA PCI function */ static void irdma_del_init_mem(struct irdma_pci_f *rf) { struct irdma_sc_dev *dev = &rf->sc_dev; kfree(dev->hmc_info->sd_table.sd_entry); dev->hmc_info->sd_table.sd_entry = NULL; vfree(rf->mem_rsrc); rf->mem_rsrc = NULL; dma_free_coherent(rf->hw.device, rf->obj_mem.size, rf->obj_mem.va, rf->obj_mem.pa); rf->obj_mem.va = NULL; if (rf->rdma_ver != IRDMA_GEN_1) { bitmap_free(rf->allocated_ws_nodes); rf->allocated_ws_nodes = NULL; } kfree(rf->ceqlist); rf->ceqlist = NULL; kfree(rf->iw_msixtbl); rf->iw_msixtbl = NULL; kfree(rf->hmc_info_mem); rf->hmc_info_mem = NULL; } /** * irdma_initialize_dev - initialize device * @rf: RDMA PCI function * * Allocate memory for the hmc objects and initialize iwdev * Return 0 if successful, otherwise clean up the resources * and return error */ static int irdma_initialize_dev(struct irdma_pci_f *rf) { int status; struct irdma_sc_dev *dev = &rf->sc_dev; struct irdma_device_init_info info = {}; struct irdma_dma_mem mem; u32 size; size = sizeof(struct irdma_hmc_pble_rsrc) + sizeof(struct irdma_hmc_info) + (sizeof(struct irdma_hmc_obj_info) * IRDMA_HMC_IW_MAX); rf->hmc_info_mem = kzalloc(size, GFP_KERNEL); if (!rf->hmc_info_mem) return -ENOMEM; rf->pble_rsrc = (struct irdma_hmc_pble_rsrc *)rf->hmc_info_mem; dev->hmc_info = &rf->hw.hmc; dev->hmc_info->hmc_obj = (struct irdma_hmc_obj_info *) (rf->pble_rsrc + 1); status = irdma_obj_aligned_mem(rf, &mem, IRDMA_QUERY_FPM_BUF_SIZE, IRDMA_FPM_QUERY_BUF_ALIGNMENT_M); if (status) goto error; info.fpm_query_buf_pa = mem.pa; info.fpm_query_buf = mem.va; status = irdma_obj_aligned_mem(rf, &mem, IRDMA_COMMIT_FPM_BUF_SIZE, IRDMA_FPM_COMMIT_BUF_ALIGNMENT_M); if (status) goto error; info.fpm_commit_buf_pa = mem.pa; info.fpm_commit_buf = mem.va; info.bar0 = rf->hw.hw_addr; info.hmc_fn_id = rf->pf_id; info.hw = &rf->hw; status = irdma_sc_dev_init(rf->rdma_ver, &rf->sc_dev, &info); if (status) goto error; return status; error: kfree(rf->hmc_info_mem); rf->hmc_info_mem = NULL; return status; } /** * irdma_rt_deinit_hw - clean up the irdma device resources * @iwdev: irdma device * * remove the mac ip entry and ipv4/ipv6 addresses, destroy the * device queues and free the pble and the hmc objects */ void irdma_rt_deinit_hw(struct irdma_device *iwdev) { ibdev_dbg(&iwdev->ibdev, "INIT: state = %d\n", iwdev->init_state); switch (iwdev->init_state) { case IP_ADDR_REGISTERED: if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) irdma_del_local_mac_entry(iwdev->rf, (u8)iwdev->mac_ip_table_idx); fallthrough; case AEQ_CREATED: case PBLE_CHUNK_MEM: case CEQS_CREATED: case IEQ_CREATED: if (!iwdev->roce_mode) irdma_puda_dele_rsrc(&iwdev->vsi, IRDMA_PUDA_RSRC_TYPE_IEQ, iwdev->rf->reset); fallthrough; case ILQ_CREATED: if (!iwdev->roce_mode) irdma_puda_dele_rsrc(&iwdev->vsi, IRDMA_PUDA_RSRC_TYPE_ILQ, iwdev->rf->reset); break; default: ibdev_warn(&iwdev->ibdev, "bad init_state = %d\n", iwdev->init_state); break; } irdma_cleanup_cm_core(&iwdev->cm_core); if (iwdev->vsi.pestat) { irdma_vsi_stats_free(&iwdev->vsi); kfree(iwdev->vsi.pestat); } if (iwdev->cleanup_wq) destroy_workqueue(iwdev->cleanup_wq); } static int irdma_setup_init_state(struct irdma_pci_f *rf) { int status; status = irdma_save_msix_info(rf); if (status) return status; rf->hw.device = &rf->pcidev->dev; rf->obj_mem.size = ALIGN(8192, IRDMA_HW_PAGE_SIZE); rf->obj_mem.va = dma_alloc_coherent(rf->hw.device, rf->obj_mem.size, &rf->obj_mem.pa, GFP_KERNEL); if (!rf->obj_mem.va) { status = -ENOMEM; goto clean_msixtbl; } rf->obj_next = rf->obj_mem; status = irdma_initialize_dev(rf); if (status) goto clean_obj_mem; return 0; clean_obj_mem: dma_free_coherent(rf->hw.device, rf->obj_mem.size, rf->obj_mem.va, rf->obj_mem.pa); rf->obj_mem.va = NULL; clean_msixtbl: kfree(rf->iw_msixtbl); rf->iw_msixtbl = NULL; return status; } /** * irdma_get_used_rsrc - determine resources used internally * @iwdev: irdma device * * Called at the end of open to get all internal allocations */ static void irdma_get_used_rsrc(struct irdma_device *iwdev) { iwdev->rf->used_pds = find_first_zero_bit(iwdev->rf->allocated_pds, iwdev->rf->max_pd); iwdev->rf->used_qps = find_first_zero_bit(iwdev->rf->allocated_qps, iwdev->rf->max_qp); iwdev->rf->used_cqs = find_first_zero_bit(iwdev->rf->allocated_cqs, iwdev->rf->max_cq); iwdev->rf->used_mrs = find_first_zero_bit(iwdev->rf->allocated_mrs, iwdev->rf->max_mr); } void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf) { enum init_completion_state state = rf->init_state; rf->init_state = INVALID_STATE; if (rf->rsrc_created) { irdma_destroy_aeq(rf); irdma_destroy_pble_prm(rf->pble_rsrc); irdma_del_ceqs(rf); rf->rsrc_created = false; } switch (state) { case CEQ0_CREATED: irdma_del_ceq_0(rf); fallthrough; case CCQ_CREATED: irdma_destroy_ccq(rf); fallthrough; case HW_RSRC_INITIALIZED: case HMC_OBJS_CREATED: irdma_del_hmc_objects(&rf->sc_dev, rf->sc_dev.hmc_info, true, rf->reset, rf->rdma_ver); fallthrough; case CQP_CREATED: irdma_destroy_cqp(rf); fallthrough; case INITIAL_STATE: irdma_del_init_mem(rf); break; case INVALID_STATE: default: ibdev_warn(&rf->iwdev->ibdev, "bad init_state = %d\n", rf->init_state); break; } } /** * irdma_rt_init_hw - Initializes runtime portion of HW * @iwdev: irdma device * @l2params: qos, tc, mtu info from netdev driver * * Create device queues ILQ, IEQ, CEQs and PBLEs. Setup irdma * device resource objects. */ int irdma_rt_init_hw(struct irdma_device *iwdev, struct irdma_l2params *l2params) { struct irdma_pci_f *rf = iwdev->rf; struct irdma_sc_dev *dev = &rf->sc_dev; struct irdma_vsi_init_info vsi_info = {}; struct irdma_vsi_stats_info stats_info = {}; int status; vsi_info.dev = dev; vsi_info.back_vsi = iwdev; vsi_info.params = l2params; vsi_info.pf_data_vsi_num = iwdev->vsi_num; vsi_info.register_qset = rf->gen_ops.register_qset; vsi_info.unregister_qset = rf->gen_ops.unregister_qset; vsi_info.exception_lan_q = 2; irdma_sc_vsi_init(&iwdev->vsi, &vsi_info); status = irdma_setup_cm_core(iwdev, rf->rdma_ver); if (status) return status; stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL); if (!stats_info.pestat) { irdma_cleanup_cm_core(&iwdev->cm_core); return -ENOMEM; } stats_info.fcn_id = dev->hmc_fn_id; status = irdma_vsi_stats_init(&iwdev->vsi, &stats_info); if (status) { irdma_cleanup_cm_core(&iwdev->cm_core); kfree(stats_info.pestat); return status; } do { if (!iwdev->roce_mode) { status = irdma_initialize_ilq(iwdev); if (status) break; iwdev->init_state = ILQ_CREATED; status = irdma_initialize_ieq(iwdev); if (status) break; iwdev->init_state = IEQ_CREATED; } if (!rf->rsrc_created) { status = irdma_setup_ceqs(rf, &iwdev->vsi); if (status) break; iwdev->init_state = CEQS_CREATED; status = irdma_hmc_init_pble(&rf->sc_dev, rf->pble_rsrc); if (status) { irdma_del_ceqs(rf); break; } iwdev->init_state = PBLE_CHUNK_MEM; status = irdma_setup_aeq(rf); if (status) { irdma_destroy_pble_prm(rf->pble_rsrc); irdma_del_ceqs(rf); break; } iwdev->init_state = AEQ_CREATED; rf->rsrc_created = true; } if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) irdma_alloc_set_mac(iwdev); irdma_add_ip(iwdev); iwdev->init_state = IP_ADDR_REGISTERED; /* handles asynch cleanup tasks - disconnect CM , free qp, * free cq bufs */ iwdev->cleanup_wq = alloc_workqueue("irdma-cleanup-wq", WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); if (!iwdev->cleanup_wq) return -ENOMEM; irdma_get_used_rsrc(iwdev); init_waitqueue_head(&iwdev->suspend_wq); return 0; } while (0); dev_err(&rf->pcidev->dev, "HW runtime init FAIL status = %d last cmpl = %d\n", status, iwdev->init_state); irdma_rt_deinit_hw(iwdev); return status; } /** * irdma_ctrl_init_hw - Initializes control portion of HW * @rf: RDMA PCI function * * Create admin queues, HMC obejcts and RF resource objects */ int irdma_ctrl_init_hw(struct irdma_pci_f *rf) { struct irdma_sc_dev *dev = &rf->sc_dev; int status; do { status = irdma_setup_init_state(rf); if (status) break; rf->init_state = INITIAL_STATE; status = irdma_create_cqp(rf); if (status) break; rf->init_state = CQP_CREATED; status = irdma_hmc_setup(rf); if (status) break; rf->init_state = HMC_OBJS_CREATED; status = irdma_initialize_hw_rsrc(rf); if (status) break; rf->init_state = HW_RSRC_INITIALIZED; status = irdma_create_ccq(rf); if (status) break; rf->init_state = CCQ_CREATED; dev->feature_info[IRDMA_FEATURE_FW_INFO] = IRDMA_FW_VER_DEFAULT; if (rf->rdma_ver != IRDMA_GEN_1) { status = irdma_get_rdma_features(dev); if (status) break; } status = irdma_setup_ceq_0(rf); if (status) break; rf->init_state = CEQ0_CREATED; /* Handles processing of CQP completions */ rf->cqp_cmpl_wq = alloc_ordered_workqueue("cqp_cmpl_wq", WQ_HIGHPRI); if (!rf->cqp_cmpl_wq) { status = -ENOMEM; break; } INIT_WORK(&rf->cqp_cmpl_work, cqp_compl_worker); irdma_sc_ccq_arm(dev->ccq); return 0; } while (0); dev_err(&rf->pcidev->dev, "IRDMA hardware initialization FAILED init_state=%d status=%d\n", rf->init_state, status); irdma_ctrl_deinit_hw(rf); return status; } /** * irdma_set_hw_rsrc - set hw memory resources. * @rf: RDMA PCI function */ static void irdma_set_hw_rsrc(struct irdma_pci_f *rf) { rf->allocated_qps = (void *)(rf->mem_rsrc + (sizeof(struct irdma_arp_entry) * rf->arp_table_size)); rf->allocated_cqs = &rf->allocated_qps[BITS_TO_LONGS(rf->max_qp)]; rf->allocated_mrs = &rf->allocated_cqs[BITS_TO_LONGS(rf->max_cq)]; rf->allocated_pds = &rf->allocated_mrs[BITS_TO_LONGS(rf->max_mr)]; rf->allocated_ahs = &rf->allocated_pds[BITS_TO_LONGS(rf->max_pd)]; rf->allocated_mcgs = &rf->allocated_ahs[BITS_TO_LONGS(rf->max_ah)]; rf->allocated_arps = &rf->allocated_mcgs[BITS_TO_LONGS(rf->max_mcg)]; rf->qp_table = (struct irdma_qp **) (&rf->allocated_arps[BITS_TO_LONGS(rf->arp_table_size)]); rf->cq_table = (struct irdma_cq **)(&rf->qp_table[rf->max_qp]); spin_lock_init(&rf->rsrc_lock); spin_lock_init(&rf->arp_lock); spin_lock_init(&rf->qptable_lock); spin_lock_init(&rf->cqtable_lock); spin_lock_init(&rf->qh_list_lock); } /** * irdma_calc_mem_rsrc_size - calculate memory resources size. * @rf: RDMA PCI function */ static u32 irdma_calc_mem_rsrc_size(struct irdma_pci_f *rf) { u32 rsrc_size; rsrc_size = sizeof(struct irdma_arp_entry) * rf->arp_table_size; rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_qp); rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mr); rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_cq); rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_pd); rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->arp_table_size); rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_ah); rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mcg); rsrc_size += sizeof(struct irdma_qp **) * rf->max_qp; rsrc_size += sizeof(struct irdma_cq **) * rf->max_cq; return rsrc_size; } /** * irdma_initialize_hw_rsrc - initialize hw resource tracking array * @rf: RDMA PCI function */ u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf) { u32 rsrc_size; u32 mrdrvbits; u32 ret; if (rf->rdma_ver != IRDMA_GEN_1) { rf->allocated_ws_nodes = bitmap_zalloc(IRDMA_MAX_WS_NODES, GFP_KERNEL); if (!rf->allocated_ws_nodes) return -ENOMEM; set_bit(0, rf->allocated_ws_nodes); rf->max_ws_node_id = IRDMA_MAX_WS_NODES; } rf->max_cqe = rf->sc_dev.hw_attrs.uk_attrs.max_hw_cq_size; rf->max_qp = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt; rf->max_mr = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt; rf->max_cq = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt; rf->max_pd = rf->sc_dev.hw_attrs.max_hw_pds; rf->arp_table_size = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt; rf->max_ah = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt; rf->max_mcg = rf->max_qp; rsrc_size = irdma_calc_mem_rsrc_size(rf); rf->mem_rsrc = vzalloc(rsrc_size); if (!rf->mem_rsrc) { ret = -ENOMEM; goto mem_rsrc_vzalloc_fail; } rf->arp_table = (struct irdma_arp_entry *)rf->mem_rsrc; irdma_set_hw_rsrc(rf); set_bit(0, rf->allocated_mrs); set_bit(0, rf->allocated_qps); set_bit(0, rf->allocated_cqs); set_bit(0, rf->allocated_pds); set_bit(0, rf->allocated_arps); set_bit(0, rf->allocated_ahs); set_bit(0, rf->allocated_mcgs); set_bit(2, rf->allocated_qps); /* qp 2 IEQ */ set_bit(1, rf->allocated_qps); /* qp 1 ILQ */ set_bit(1, rf->allocated_cqs); set_bit(1, rf->allocated_pds); set_bit(2, rf->allocated_cqs); set_bit(2, rf->allocated_pds); INIT_LIST_HEAD(&rf->mc_qht_list.list); /* stag index mask has a minimum of 14 bits */ mrdrvbits = 24 - max(get_count_order(rf->max_mr), 14); rf->mr_stagmask = ~(((1 << mrdrvbits) - 1) << (32 - mrdrvbits)); return 0; mem_rsrc_vzalloc_fail: bitmap_free(rf->allocated_ws_nodes); rf->allocated_ws_nodes = NULL; return ret; } /** * irdma_cqp_ce_handler - handle cqp completions * @rf: RDMA PCI function * @cq: cq for cqp completions */ void irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq) { struct irdma_cqp_request *cqp_request; struct irdma_sc_dev *dev = &rf->sc_dev; u32 cqe_count = 0; struct irdma_ccq_cqe_info info; unsigned long flags; int ret; do { memset(&info, 0, sizeof(info)); spin_lock_irqsave(&rf->cqp.compl_lock, flags); ret = irdma_sc_ccq_get_cqe_info(cq, &info); spin_unlock_irqrestore(&rf->cqp.compl_lock, flags); if (ret) break; cqp_request = (struct irdma_cqp_request *) (unsigned long)info.scratch; if (info.error && irdma_cqp_crit_err(dev, cqp_request->info.cqp_cmd, info.maj_err_code, info.min_err_code)) ibdev_err(&rf->iwdev->ibdev, "cqp opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n", info.op_code, info.maj_err_code, info.min_err_code); if (cqp_request) { cqp_request->compl_info.maj_err_code = info.maj_err_code; cqp_request->compl_info.min_err_code = info.min_err_code; cqp_request->compl_info.op_ret_val = info.op_ret_val; cqp_request->compl_info.error = info.error; if (cqp_request->waiting) { WRITE_ONCE(cqp_request->request_done, true); wake_up(&cqp_request->waitq); irdma_put_cqp_request(&rf->cqp, cqp_request); } else { if (cqp_request->callback_fcn) cqp_request->callback_fcn(cqp_request); irdma_put_cqp_request(&rf->cqp, cqp_request); } } cqe_count++; } while (1); if (cqe_count) { irdma_process_bh(dev); irdma_sc_ccq_arm(cq); } } /** * cqp_compl_worker - Handle cqp completions * @work: Pointer to work structure */ void cqp_compl_worker(struct work_struct *work) { struct irdma_pci_f *rf = container_of(work, struct irdma_pci_f, cqp_cmpl_work); struct irdma_sc_cq *cq = &rf->ccq.sc_cq; irdma_cqp_ce_handler(rf, cq); } /** * irdma_lookup_apbvt_entry - lookup hash table for an existing apbvt entry corresponding to port * @cm_core: cm's core * @port: port to identify apbvt entry */ static struct irdma_apbvt_entry *irdma_lookup_apbvt_entry(struct irdma_cm_core *cm_core, u16 port) { struct irdma_apbvt_entry *entry; hash_for_each_possible(cm_core->apbvt_hash_tbl, entry, hlist, port) { if (entry->port == port) { entry->use_cnt++; return entry; } } return NULL; } /** * irdma_next_iw_state - modify qp state * @iwqp: iwarp qp to modify * @state: next state for qp * @del_hash: del hash * @term: term message * @termlen: length of term message */ void irdma_next_iw_state(struct irdma_qp *iwqp, u8 state, u8 del_hash, u8 term, u8 termlen) { struct irdma_modify_qp_info info = {}; info.next_iwarp_state = state; info.remove_hash_idx = del_hash; info.cq_num_valid = true; info.arp_cache_idx_valid = true; info.dont_send_term = true; info.dont_send_fin = true; info.termlen = termlen; if (term & IRDMAQP_TERM_SEND_TERM_ONLY) info.dont_send_term = false; if (term & IRDMAQP_TERM_SEND_FIN_ONLY) info.dont_send_fin = false; if (iwqp->sc_qp.term_flags && state == IRDMA_QP_STATE_ERROR) info.reset_tcp_conn = true; iwqp->hw_iwarp_state = state; irdma_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0); iwqp->iwarp_state = info.next_iwarp_state; } /** * irdma_del_local_mac_entry - remove a mac entry from the hw * table * @rf: RDMA PCI function * @idx: the index of the mac ip address to delete */ void irdma_del_local_mac_entry(struct irdma_pci_f *rf, u16 idx) { struct irdma_cqp *iwcqp = &rf->cqp; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true); if (!cqp_request) return; cqp_info = &cqp_request->info; cqp_info->cqp_cmd = IRDMA_OP_DELETE_LOCAL_MAC_ENTRY; cqp_info->post_sq = 1; cqp_info->in.u.del_local_mac_entry.cqp = &iwcqp->sc_cqp; cqp_info->in.u.del_local_mac_entry.scratch = (uintptr_t)cqp_request; cqp_info->in.u.del_local_mac_entry.entry_idx = idx; cqp_info->in.u.del_local_mac_entry.ignore_ref_count = 0; irdma_handle_cqp_op(rf, cqp_request); irdma_put_cqp_request(iwcqp, cqp_request); } /** * irdma_add_local_mac_entry - add a mac ip address entry to the * hw table * @rf: RDMA PCI function * @mac_addr: pointer to mac address * @idx: the index of the mac ip address to add */ int irdma_add_local_mac_entry(struct irdma_pci_f *rf, const u8 *mac_addr, u16 idx) { struct irdma_local_mac_entry_info *info; struct irdma_cqp *iwcqp = &rf->cqp; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; int status; cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true); if (!cqp_request) return -ENOMEM; cqp_info = &cqp_request->info; cqp_info->post_sq = 1; info = &cqp_info->in.u.add_local_mac_entry.info; ether_addr_copy(info->mac_addr, mac_addr); info->entry_idx = idx; cqp_info->in.u.add_local_mac_entry.scratch = (uintptr_t)cqp_request; cqp_info->cqp_cmd = IRDMA_OP_ADD_LOCAL_MAC_ENTRY; cqp_info->in.u.add_local_mac_entry.cqp = &iwcqp->sc_cqp; cqp_info->in.u.add_local_mac_entry.scratch = (uintptr_t)cqp_request; status = irdma_handle_cqp_op(rf, cqp_request); irdma_put_cqp_request(iwcqp, cqp_request); return status; } /** * irdma_alloc_local_mac_entry - allocate a mac entry * @rf: RDMA PCI function * @mac_tbl_idx: the index of the new mac address * * Allocate a mac address entry and update the mac_tbl_idx * to hold the index of the newly created mac address * Return 0 if successful, otherwise return error */ int irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx) { struct irdma_cqp *iwcqp = &rf->cqp; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; int status = 0; cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true); if (!cqp_request) return -ENOMEM; cqp_info = &cqp_request->info; cqp_info->cqp_cmd = IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY; cqp_info->post_sq = 1; cqp_info->in.u.alloc_local_mac_entry.cqp = &iwcqp->sc_cqp; cqp_info->in.u.alloc_local_mac_entry.scratch = (uintptr_t)cqp_request; status = irdma_handle_cqp_op(rf, cqp_request); if (!status) *mac_tbl_idx = (u16)cqp_request->compl_info.op_ret_val; irdma_put_cqp_request(iwcqp, cqp_request); return status; } /** * irdma_cqp_manage_apbvt_cmd - send cqp command manage apbvt * @iwdev: irdma device * @accel_local_port: port for apbvt * @add_port: add ordelete port */ static int irdma_cqp_manage_apbvt_cmd(struct irdma_device *iwdev, u16 accel_local_port, bool add_port) { struct irdma_apbvt_info *info; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; int status; cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, add_port); if (!cqp_request) return -ENOMEM; cqp_info = &cqp_request->info; info = &cqp_info->in.u.manage_apbvt_entry.info; memset(info, 0, sizeof(*info)); info->add = add_port; info->port = accel_local_port; cqp_info->cqp_cmd = IRDMA_OP_MANAGE_APBVT_ENTRY; cqp_info->post_sq = 1; cqp_info->in.u.manage_apbvt_entry.cqp = &iwdev->rf->cqp.sc_cqp; cqp_info->in.u.manage_apbvt_entry.scratch = (uintptr_t)cqp_request; ibdev_dbg(&iwdev->ibdev, "DEV: %s: port=0x%04x\n", (!add_port) ? "DELETE" : "ADD", accel_local_port); status = irdma_handle_cqp_op(iwdev->rf, cqp_request); irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); return status; } /** * irdma_add_apbvt - add tcp port to HW apbvt table * @iwdev: irdma device * @port: port for apbvt */ struct irdma_apbvt_entry *irdma_add_apbvt(struct irdma_device *iwdev, u16 port) { struct irdma_cm_core *cm_core = &iwdev->cm_core; struct irdma_apbvt_entry *entry; unsigned long flags; spin_lock_irqsave(&cm_core->apbvt_lock, flags); entry = irdma_lookup_apbvt_entry(cm_core, port); if (entry) { spin_unlock_irqrestore(&cm_core->apbvt_lock, flags); return entry; } entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (!entry) { spin_unlock_irqrestore(&cm_core->apbvt_lock, flags); return NULL; } entry->port = port; entry->use_cnt = 1; hash_add(cm_core->apbvt_hash_tbl, &entry->hlist, entry->port); spin_unlock_irqrestore(&cm_core->apbvt_lock, flags); if (irdma_cqp_manage_apbvt_cmd(iwdev, port, true)) { kfree(entry); return NULL; } return entry; } /** * irdma_del_apbvt - delete tcp port from HW apbvt table * @iwdev: irdma device * @entry: apbvt entry object */ void irdma_del_apbvt(struct irdma_device *iwdev, struct irdma_apbvt_entry *entry) { struct irdma_cm_core *cm_core = &iwdev->cm_core; unsigned long flags; spin_lock_irqsave(&cm_core->apbvt_lock, flags); if (--entry->use_cnt) { spin_unlock_irqrestore(&cm_core->apbvt_lock, flags); return; } hash_del(&entry->hlist); /* apbvt_lock is held across CQP delete APBVT OP (non-waiting) to * protect against race where add APBVT CQP can race ahead of the delete * APBVT for same port. */ irdma_cqp_manage_apbvt_cmd(iwdev, entry->port, false); kfree(entry); spin_unlock_irqrestore(&cm_core->apbvt_lock, flags); } /** * irdma_manage_arp_cache - manage hw arp cache * @rf: RDMA PCI function * @mac_addr: mac address ptr * @ip_addr: ip addr for arp cache * @ipv4: flag inicating IPv4 * @action: add, delete or modify */ void irdma_manage_arp_cache(struct irdma_pci_f *rf, const unsigned char *mac_addr, u32 *ip_addr, bool ipv4, u32 action) { struct irdma_add_arp_cache_entry_info *info; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; int arp_index; arp_index = irdma_arp_table(rf, ip_addr, ipv4, mac_addr, action); if (arp_index == -1) return; cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false); if (!cqp_request) return; cqp_info = &cqp_request->info; if (action == IRDMA_ARP_ADD) { cqp_info->cqp_cmd = IRDMA_OP_ADD_ARP_CACHE_ENTRY; info = &cqp_info->in.u.add_arp_cache_entry.info; memset(info, 0, sizeof(*info)); info->arp_index = (u16)arp_index; info->permanent = true; ether_addr_copy(info->mac_addr, mac_addr); cqp_info->in.u.add_arp_cache_entry.scratch = (uintptr_t)cqp_request; cqp_info->in.u.add_arp_cache_entry.cqp = &rf->cqp.sc_cqp; } else { cqp_info->cqp_cmd = IRDMA_OP_DELETE_ARP_CACHE_ENTRY; cqp_info->in.u.del_arp_cache_entry.scratch = (uintptr_t)cqp_request; cqp_info->in.u.del_arp_cache_entry.cqp = &rf->cqp.sc_cqp; cqp_info->in.u.del_arp_cache_entry.arp_index = arp_index; } cqp_info->post_sq = 1; irdma_handle_cqp_op(rf, cqp_request); irdma_put_cqp_request(&rf->cqp, cqp_request); } /** * irdma_send_syn_cqp_callback - do syn/ack after qhash * @cqp_request: qhash cqp completion */ static void irdma_send_syn_cqp_callback(struct irdma_cqp_request *cqp_request) { struct irdma_cm_node *cm_node = cqp_request->param; irdma_send_syn(cm_node, 1); irdma_rem_ref_cm_node(cm_node); } /** * irdma_manage_qhash - add or modify qhash * @iwdev: irdma device * @cminfo: cm info for qhash * @etype: type (syn or quad) * @mtype: type of qhash * @cmnode: cmnode associated with connection * @wait: wait for completion */ int irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo, enum irdma_quad_entry_type etype, enum irdma_quad_hash_manage_type mtype, void *cmnode, bool wait) { struct irdma_qhash_table_info *info; struct irdma_cqp *iwcqp = &iwdev->rf->cqp; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; struct irdma_cm_node *cm_node = cmnode; int status; cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait); if (!cqp_request) return -ENOMEM; cqp_info = &cqp_request->info; info = &cqp_info->in.u.manage_qhash_table_entry.info; memset(info, 0, sizeof(*info)); info->vsi = &iwdev->vsi; info->manage = mtype; info->entry_type = etype; if (cminfo->vlan_id < VLAN_N_VID) { info->vlan_valid = true; info->vlan_id = cminfo->vlan_id; } else { info->vlan_valid = false; } info->ipv4_valid = cminfo->ipv4; info->user_pri = cminfo->user_pri; ether_addr_copy(info->mac_addr, iwdev->netdev->dev_addr); info->qp_num = cminfo->qh_qpid; info->dest_port = cminfo->loc_port; info->dest_ip[0] = cminfo->loc_addr[0]; info->dest_ip[1] = cminfo->loc_addr[1]; info->dest_ip[2] = cminfo->loc_addr[2]; info->dest_ip[3] = cminfo->loc_addr[3]; if (etype == IRDMA_QHASH_TYPE_TCP_ESTABLISHED || etype == IRDMA_QHASH_TYPE_UDP_UNICAST || etype == IRDMA_QHASH_TYPE_UDP_MCAST || etype == IRDMA_QHASH_TYPE_ROCE_MCAST || etype == IRDMA_QHASH_TYPE_ROCEV2_HW) { info->src_port = cminfo->rem_port; info->src_ip[0] = cminfo->rem_addr[0]; info->src_ip[1] = cminfo->rem_addr[1]; info->src_ip[2] = cminfo->rem_addr[2]; info->src_ip[3] = cminfo->rem_addr[3]; } if (cmnode) { cqp_request->callback_fcn = irdma_send_syn_cqp_callback; cqp_request->param = cmnode; if (!wait) refcount_inc(&cm_node->refcnt); } if (info->ipv4_valid) ibdev_dbg(&iwdev->ibdev, "CM: %s caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%pI4 rem_addr=%pI4 mac=%pM, vlan_id=%d cm_node=%p\n", (!mtype) ? "DELETE" : "ADD", __builtin_return_address(0), info->dest_port, info->src_port, info->dest_ip, info->src_ip, info->mac_addr, cminfo->vlan_id, cmnode ? cmnode : NULL); else ibdev_dbg(&iwdev->ibdev, "CM: %s caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%pI6 rem_addr=%pI6 mac=%pM, vlan_id=%d cm_node=%p\n", (!mtype) ? "DELETE" : "ADD", __builtin_return_address(0), info->dest_port, info->src_port, info->dest_ip, info->src_ip, info->mac_addr, cminfo->vlan_id, cmnode ? cmnode : NULL); cqp_info->in.u.manage_qhash_table_entry.cqp = &iwdev->rf->cqp.sc_cqp; cqp_info->in.u.manage_qhash_table_entry.scratch = (uintptr_t)cqp_request; cqp_info->cqp_cmd = IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY; cqp_info->post_sq = 1; status = irdma_handle_cqp_op(iwdev->rf, cqp_request); if (status && cm_node && !wait) irdma_rem_ref_cm_node(cm_node); irdma_put_cqp_request(iwcqp, cqp_request); return status; } /** * irdma_hw_flush_wqes_callback - Check return code after flush * @cqp_request: qhash cqp completion */ static void irdma_hw_flush_wqes_callback(struct irdma_cqp_request *cqp_request) { struct irdma_qp_flush_info *hw_info; struct irdma_sc_qp *qp; struct irdma_qp *iwqp; struct cqp_cmds_info *cqp_info; cqp_info = &cqp_request->info; hw_info = &cqp_info->in.u.qp_flush_wqes.info; qp = cqp_info->in.u.qp_flush_wqes.qp; iwqp = qp->qp_uk.back_qp; if (cqp_request->compl_info.maj_err_code) return; if (hw_info->rq && (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_SQ_WQE_FLUSHED || cqp_request->compl_info.min_err_code == 0)) { /* RQ WQE flush was requested but did not happen */ qp->qp_uk.rq_flush_complete = true; } if (hw_info->sq && (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_RQ_WQE_FLUSHED || cqp_request->compl_info.min_err_code == 0)) { if (IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring)) { ibdev_err(&iwqp->iwdev->ibdev, "Flush QP[%d] failed, SQ has more work", qp->qp_uk.qp_id); irdma_ib_qp_event(iwqp, IRDMA_QP_EVENT_CATASTROPHIC); } qp->qp_uk.sq_flush_complete = true; } } /** * irdma_hw_flush_wqes - flush qp's wqe * @rf: RDMA PCI function * @qp: hardware control qp * @info: info for flush * @wait: flag wait for completion */ int irdma_hw_flush_wqes(struct irdma_pci_f *rf, struct irdma_sc_qp *qp, struct irdma_qp_flush_info *info, bool wait) { int status; struct irdma_qp_flush_info *hw_info; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; struct irdma_qp *iwqp = qp->qp_uk.back_qp; cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait); if (!cqp_request) return -ENOMEM; cqp_info = &cqp_request->info; if (!wait) cqp_request->callback_fcn = irdma_hw_flush_wqes_callback; hw_info = &cqp_request->info.in.u.qp_flush_wqes.info; memcpy(hw_info, info, sizeof(*hw_info)); cqp_info->cqp_cmd = IRDMA_OP_QP_FLUSH_WQES; cqp_info->post_sq = 1; cqp_info->in.u.qp_flush_wqes.qp = qp; cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)cqp_request; status = irdma_handle_cqp_op(rf, cqp_request); if (status) { qp->qp_uk.sq_flush_complete = true; qp->qp_uk.rq_flush_complete = true; irdma_put_cqp_request(&rf->cqp, cqp_request); return status; } if (!wait || cqp_request->compl_info.maj_err_code) goto put_cqp; if (info->rq) { if (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_SQ_WQE_FLUSHED || cqp_request->compl_info.min_err_code == 0) { /* RQ WQE flush was requested but did not happen */ qp->qp_uk.rq_flush_complete = true; } } if (info->sq) { if (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_RQ_WQE_FLUSHED || cqp_request->compl_info.min_err_code == 0) { /* * Handling case where WQE is posted to empty SQ when * flush has not completed */ if (IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring)) { struct irdma_cqp_request *new_req; if (!qp->qp_uk.sq_flush_complete) goto put_cqp; qp->qp_uk.sq_flush_complete = false; qp->flush_sq = false; info->rq = false; info->sq = true; new_req = irdma_alloc_and_get_cqp_request(&rf->cqp, true); if (!new_req) { status = -ENOMEM; goto put_cqp; } cqp_info = &new_req->info; hw_info = &new_req->info.in.u.qp_flush_wqes.info; memcpy(hw_info, info, sizeof(*hw_info)); cqp_info->cqp_cmd = IRDMA_OP_QP_FLUSH_WQES; cqp_info->post_sq = 1; cqp_info->in.u.qp_flush_wqes.qp = qp; cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)new_req; status = irdma_handle_cqp_op(rf, new_req); if (new_req->compl_info.maj_err_code || new_req->compl_info.min_err_code != IRDMA_CQP_COMPL_SQ_WQE_FLUSHED || status) { ibdev_err(&iwqp->iwdev->ibdev, "fatal QP event: SQ in error but not flushed, qp: %d", iwqp->ibqp.qp_num); qp->qp_uk.sq_flush_complete = false; irdma_ib_qp_event(iwqp, IRDMA_QP_EVENT_CATASTROPHIC); } irdma_put_cqp_request(&rf->cqp, new_req); } else { /* SQ WQE flush was requested but did not happen */ qp->qp_uk.sq_flush_complete = true; } } else { if (!IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring)) qp->qp_uk.sq_flush_complete = true; } } ibdev_dbg(&rf->iwdev->ibdev, "VERBS: qp_id=%d qp_type=%d qpstate=%d ibqpstate=%d last_aeq=%d hw_iw_state=%d maj_err_code=%d min_err_code=%d\n", iwqp->ibqp.qp_num, rf->protocol_used, iwqp->iwarp_state, iwqp->ibqp_state, iwqp->last_aeq, iwqp->hw_iwarp_state, cqp_request->compl_info.maj_err_code, cqp_request->compl_info.min_err_code); put_cqp: irdma_put_cqp_request(&rf->cqp, cqp_request); return status; } /** * irdma_gen_ae - generate AE * @rf: RDMA PCI function * @qp: qp associated with AE * @info: info for ae * @wait: wait for completion */ void irdma_gen_ae(struct irdma_pci_f *rf, struct irdma_sc_qp *qp, struct irdma_gen_ae_info *info, bool wait) { struct irdma_gen_ae_info *ae_info; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait); if (!cqp_request) return; cqp_info = &cqp_request->info; ae_info = &cqp_request->info.in.u.gen_ae.info; memcpy(ae_info, info, sizeof(*ae_info)); cqp_info->cqp_cmd = IRDMA_OP_GEN_AE; cqp_info->post_sq = 1; cqp_info->in.u.gen_ae.qp = qp; cqp_info->in.u.gen_ae.scratch = (uintptr_t)cqp_request; irdma_handle_cqp_op(rf, cqp_request); irdma_put_cqp_request(&rf->cqp, cqp_request); } void irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask) { struct irdma_qp_flush_info info = {}; struct irdma_pci_f *rf = iwqp->iwdev->rf; u8 flush_code = iwqp->sc_qp.flush_code; if (!(flush_mask & IRDMA_FLUSH_SQ) && !(flush_mask & IRDMA_FLUSH_RQ)) return; /* Set flush info fields*/ info.sq = flush_mask & IRDMA_FLUSH_SQ; info.rq = flush_mask & IRDMA_FLUSH_RQ; /* Generate userflush errors in CQE */ info.sq_major_code = IRDMA_FLUSH_MAJOR_ERR; info.sq_minor_code = FLUSH_GENERAL_ERR; info.rq_major_code = IRDMA_FLUSH_MAJOR_ERR; info.rq_minor_code = FLUSH_GENERAL_ERR; info.userflushcode = true; if (flush_mask & IRDMA_REFLUSH) { if (info.sq) iwqp->sc_qp.flush_sq = false; if (info.rq) iwqp->sc_qp.flush_rq = false; } else { if (flush_code) { if (info.sq && iwqp->sc_qp.sq_flush_code) info.sq_minor_code = flush_code; if (info.rq && iwqp->sc_qp.rq_flush_code) info.rq_minor_code = flush_code; } if (!iwqp->user_mode) queue_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS)); } /* Issue flush */ (void)irdma_hw_flush_wqes(rf, &iwqp->sc_qp, &info, flush_mask & IRDMA_FLUSH_WAIT); iwqp->flush_issued = true; }
linux-master
drivers/infiniband/hw/irdma/hw.c
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB /* Copyright (c) 2015 - 2021 Intel Corporation */ #include <linux/etherdevice.h> #include "osdep.h" #include "hmc.h" #include "defs.h" #include "type.h" #include "ws.h" #include "protos.h" /** * irdma_get_qp_from_list - get next qp from a list * @head: Listhead of qp's * @qp: current qp */ struct irdma_sc_qp *irdma_get_qp_from_list(struct list_head *head, struct irdma_sc_qp *qp) { struct list_head *lastentry; struct list_head *entry = NULL; if (list_empty(head)) return NULL; if (!qp) { entry = head->next; } else { lastentry = &qp->list; entry = lastentry->next; if (entry == head) return NULL; } return container_of(entry, struct irdma_sc_qp, list); } /** * irdma_sc_suspend_resume_qps - suspend/resume all qp's on VSI * @vsi: the VSI struct pointer * @op: Set to IRDMA_OP_RESUME or IRDMA_OP_SUSPEND */ void irdma_sc_suspend_resume_qps(struct irdma_sc_vsi *vsi, u8 op) { struct irdma_sc_qp *qp = NULL; u8 i; for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) { mutex_lock(&vsi->qos[i].qos_mutex); qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp); while (qp) { if (op == IRDMA_OP_RESUME) { if (!qp->dev->ws_add(vsi, i)) { qp->qs_handle = vsi->qos[qp->user_pri].qs_handle; irdma_cqp_qp_suspend_resume(qp, op); } else { irdma_cqp_qp_suspend_resume(qp, op); irdma_modify_qp_to_err(qp); } } else if (op == IRDMA_OP_SUSPEND) { /* issue cqp suspend command */ if (!irdma_cqp_qp_suspend_resume(qp, op)) atomic_inc(&vsi->qp_suspend_reqs); } qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp); } mutex_unlock(&vsi->qos[i].qos_mutex); } } static void irdma_set_qos_info(struct irdma_sc_vsi *vsi, struct irdma_l2params *l2p) { u8 i; vsi->qos_rel_bw = l2p->vsi_rel_bw; vsi->qos_prio_type = l2p->vsi_prio_type; vsi->dscp_mode = l2p->dscp_mode; if (l2p->dscp_mode) { memcpy(vsi->dscp_map, l2p->dscp_map, sizeof(vsi->dscp_map)); for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) l2p->up2tc[i] = i; } for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) { if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) vsi->qos[i].qs_handle = l2p->qs_handle_list[i]; vsi->qos[i].traffic_class = l2p->up2tc[i]; vsi->qos[i].rel_bw = l2p->tc_info[vsi->qos[i].traffic_class].rel_bw; vsi->qos[i].prio_type = l2p->tc_info[vsi->qos[i].traffic_class].prio_type; vsi->qos[i].valid = false; } } /** * irdma_change_l2params - given the new l2 parameters, change all qp * @vsi: RDMA VSI pointer * @l2params: New parameters from l2 */ void irdma_change_l2params(struct irdma_sc_vsi *vsi, struct irdma_l2params *l2params) { if (l2params->mtu_changed) { vsi->mtu = l2params->mtu; if (vsi->ieq) irdma_reinitialize_ieq(vsi); } if (!l2params->tc_changed) return; vsi->tc_change_pending = false; irdma_set_qos_info(vsi, l2params); irdma_sc_suspend_resume_qps(vsi, IRDMA_OP_RESUME); } /** * irdma_qp_rem_qos - remove qp from qos lists during destroy qp * @qp: qp to be removed from qos */ void irdma_qp_rem_qos(struct irdma_sc_qp *qp) { struct irdma_sc_vsi *vsi = qp->vsi; ibdev_dbg(to_ibdev(qp->dev), "DCB: DCB: Remove qp[%d] UP[%d] qset[%d] on_qoslist[%d]\n", qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle, qp->on_qoslist); mutex_lock(&vsi->qos[qp->user_pri].qos_mutex); if (qp->on_qoslist) { qp->on_qoslist = false; list_del(&qp->list); } mutex_unlock(&vsi->qos[qp->user_pri].qos_mutex); } /** * irdma_qp_add_qos - called during setctx for qp to be added to qos * @qp: qp to be added to qos */ void irdma_qp_add_qos(struct irdma_sc_qp *qp) { struct irdma_sc_vsi *vsi = qp->vsi; ibdev_dbg(to_ibdev(qp->dev), "DCB: DCB: Add qp[%d] UP[%d] qset[%d] on_qoslist[%d]\n", qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle, qp->on_qoslist); mutex_lock(&vsi->qos[qp->user_pri].qos_mutex); if (!qp->on_qoslist) { list_add(&qp->list, &vsi->qos[qp->user_pri].qplist); qp->on_qoslist = true; qp->qs_handle = vsi->qos[qp->user_pri].qs_handle; } mutex_unlock(&vsi->qos[qp->user_pri].qos_mutex); } /** * irdma_sc_pd_init - initialize sc pd struct * @dev: sc device struct * @pd: sc pd ptr * @pd_id: pd_id for allocated pd * @abi_ver: User/Kernel ABI version */ void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id, int abi_ver) { pd->pd_id = pd_id; pd->abi_ver = abi_ver; pd->dev = dev; } /** * irdma_sc_add_arp_cache_entry - cqp wqe add arp cache entry * @cqp: struct for cqp hw * @info: arp entry information * @scratch: u64 saved to be used during cqp completion * @post_sq: flag for cqp db to ring */ static int irdma_sc_add_arp_cache_entry(struct irdma_sc_cqp *cqp, struct irdma_add_arp_cache_entry_info *info, u64 scratch, bool post_sq) { __le64 *wqe; u64 hdr; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) return -ENOMEM; set_64bit_val(wqe, 8, info->reach_max); set_64bit_val(wqe, 16, ether_addr_to_u64(info->mac_addr)); hdr = info->arp_index | FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_ARP) | FIELD_PREP(IRDMA_CQPSQ_MAT_PERMANENT, (info->permanent ? 1 : 0)) | FIELD_PREP(IRDMA_CQPSQ_MAT_ENTRYVALID, 1) | FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, hdr); print_hex_dump_debug("WQE: ARP_CACHE_ENTRY WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); if (post_sq) irdma_sc_cqp_post_sq(cqp); return 0; } /** * irdma_sc_del_arp_cache_entry - dele arp cache entry * @cqp: struct for cqp hw * @scratch: u64 saved to be used during cqp completion * @arp_index: arp index to delete arp entry * @post_sq: flag for cqp db to ring */ static int irdma_sc_del_arp_cache_entry(struct irdma_sc_cqp *cqp, u64 scratch, u16 arp_index, bool post_sq) { __le64 *wqe; u64 hdr; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) return -ENOMEM; hdr = arp_index | FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_ARP) | FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, hdr); print_hex_dump_debug("WQE: ARP_CACHE_DEL_ENTRY WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); if (post_sq) irdma_sc_cqp_post_sq(cqp); return 0; } /** * irdma_sc_manage_apbvt_entry - for adding and deleting apbvt entries * @cqp: struct for cqp hw * @info: info for apbvt entry to add or delete * @scratch: u64 saved to be used during cqp completion * @post_sq: flag for cqp db to ring */ static int irdma_sc_manage_apbvt_entry(struct irdma_sc_cqp *cqp, struct irdma_apbvt_info *info, u64 scratch, bool post_sq) { __le64 *wqe; u64 hdr; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) return -ENOMEM; set_64bit_val(wqe, 16, info->port); hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_APBVT) | FIELD_PREP(IRDMA_CQPSQ_MAPT_ADDPORT, info->add) | FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, hdr); print_hex_dump_debug("WQE: MANAGE_APBVT WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); if (post_sq) irdma_sc_cqp_post_sq(cqp); return 0; } /** * irdma_sc_manage_qhash_table_entry - manage quad hash entries * @cqp: struct for cqp hw * @info: info for quad hash to manage * @scratch: u64 saved to be used during cqp completion * @post_sq: flag for cqp db to ring * * This is called before connection establishment is started. * For passive connections, when listener is created, it will * call with entry type of IRDMA_QHASH_TYPE_TCP_SYN with local * ip address and tcp port. When SYN is received (passive * connections) or sent (active connections), this routine is * called with entry type of IRDMA_QHASH_TYPE_TCP_ESTABLISHED * and quad is passed in info. * * When iwarp connection is done and its state moves to RTS, the * quad hash entry in the hardware will point to iwarp's qp * number and requires no calls from the driver. */ static int irdma_sc_manage_qhash_table_entry(struct irdma_sc_cqp *cqp, struct irdma_qhash_table_info *info, u64 scratch, bool post_sq) { __le64 *wqe; u64 qw1 = 0; u64 qw2 = 0; u64 temp; struct irdma_sc_vsi *vsi = info->vsi; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) return -ENOMEM; set_64bit_val(wqe, 0, ether_addr_to_u64(info->mac_addr)); qw1 = FIELD_PREP(IRDMA_CQPSQ_QHASH_QPN, info->qp_num) | FIELD_PREP(IRDMA_CQPSQ_QHASH_DEST_PORT, info->dest_port); if (info->ipv4_valid) { set_64bit_val(wqe, 48, FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->dest_ip[0])); } else { set_64bit_val(wqe, 56, FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR0, info->dest_ip[0]) | FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR1, info->dest_ip[1])); set_64bit_val(wqe, 48, FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR2, info->dest_ip[2]) | FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->dest_ip[3])); } qw2 = FIELD_PREP(IRDMA_CQPSQ_QHASH_QS_HANDLE, vsi->qos[info->user_pri].qs_handle); if (info->vlan_valid) qw2 |= FIELD_PREP(IRDMA_CQPSQ_QHASH_VLANID, info->vlan_id); set_64bit_val(wqe, 16, qw2); if (info->entry_type == IRDMA_QHASH_TYPE_TCP_ESTABLISHED) { qw1 |= FIELD_PREP(IRDMA_CQPSQ_QHASH_SRC_PORT, info->src_port); if (!info->ipv4_valid) { set_64bit_val(wqe, 40, FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR0, info->src_ip[0]) | FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR1, info->src_ip[1])); set_64bit_val(wqe, 32, FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR2, info->src_ip[2]) | FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->src_ip[3])); } else { set_64bit_val(wqe, 32, FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->src_ip[0])); } } set_64bit_val(wqe, 8, qw1); temp = FIELD_PREP(IRDMA_CQPSQ_QHASH_WQEVALID, cqp->polarity) | FIELD_PREP(IRDMA_CQPSQ_QHASH_OPCODE, IRDMA_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY) | FIELD_PREP(IRDMA_CQPSQ_QHASH_MANAGE, info->manage) | FIELD_PREP(IRDMA_CQPSQ_QHASH_IPV4VALID, info->ipv4_valid) | FIELD_PREP(IRDMA_CQPSQ_QHASH_VLANVALID, info->vlan_valid) | FIELD_PREP(IRDMA_CQPSQ_QHASH_ENTRYTYPE, info->entry_type); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, temp); print_hex_dump_debug("WQE: MANAGE_QHASH WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); if (post_sq) irdma_sc_cqp_post_sq(cqp); return 0; } /** * irdma_sc_qp_init - initialize qp * @qp: sc qp * @info: initialization qp info */ int irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info) { int ret_code; u32 pble_obj_cnt; u16 wqe_size; if (info->qp_uk_init_info.max_sq_frag_cnt > info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags || info->qp_uk_init_info.max_rq_frag_cnt > info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags) return -EINVAL; qp->dev = info->pd->dev; qp->vsi = info->vsi; qp->ieq_qp = info->vsi->exception_lan_q; qp->sq_pa = info->sq_pa; qp->rq_pa = info->rq_pa; qp->hw_host_ctx_pa = info->host_ctx_pa; qp->q2_pa = info->q2_pa; qp->shadow_area_pa = info->shadow_area_pa; qp->q2_buf = info->q2; qp->pd = info->pd; qp->hw_host_ctx = info->host_ctx; info->qp_uk_init_info.wqe_alloc_db = qp->pd->dev->wqe_alloc_db; ret_code = irdma_uk_qp_init(&qp->qp_uk, &info->qp_uk_init_info); if (ret_code) return ret_code; qp->virtual_map = info->virtual_map; pble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt; if ((info->virtual_map && info->sq_pa >= pble_obj_cnt) || (info->virtual_map && info->rq_pa >= pble_obj_cnt)) return -EINVAL; qp->llp_stream_handle = (void *)(-1); qp->hw_sq_size = irdma_get_encoded_wqe_size(qp->qp_uk.sq_ring.size, IRDMA_QUEUE_TYPE_SQ_RQ); ibdev_dbg(to_ibdev(qp->dev), "WQE: hw_sq_size[%04d] sq_ring.size[%04d]\n", qp->hw_sq_size, qp->qp_uk.sq_ring.size); if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1 && qp->pd->abi_ver > 4) wqe_size = IRDMA_WQE_SIZE_128; else ret_code = irdma_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt, &wqe_size); if (ret_code) return ret_code; qp->hw_rq_size = irdma_get_encoded_wqe_size(qp->qp_uk.rq_size * (wqe_size / IRDMA_QP_WQE_MIN_SIZE), IRDMA_QUEUE_TYPE_SQ_RQ); ibdev_dbg(to_ibdev(qp->dev), "WQE: hw_rq_size[%04d] qp_uk.rq_size[%04d] wqe_size[%04d]\n", qp->hw_rq_size, qp->qp_uk.rq_size, wqe_size); qp->sq_tph_val = info->sq_tph_val; qp->rq_tph_val = info->rq_tph_val; qp->sq_tph_en = info->sq_tph_en; qp->rq_tph_en = info->rq_tph_en; qp->rcv_tph_en = info->rcv_tph_en; qp->xmit_tph_en = info->xmit_tph_en; qp->qp_uk.first_sq_wq = info->qp_uk_init_info.first_sq_wq; qp->qs_handle = qp->vsi->qos[qp->user_pri].qs_handle; return 0; } /** * irdma_sc_qp_create - create qp * @qp: sc qp * @info: qp create info * @scratch: u64 saved to be used during cqp completion * @post_sq: flag for cqp db to ring */ int irdma_sc_qp_create(struct irdma_sc_qp *qp, struct irdma_create_qp_info *info, u64 scratch, bool post_sq) { struct irdma_sc_cqp *cqp; __le64 *wqe; u64 hdr; cqp = qp->dev->cqp; if (qp->qp_uk.qp_id < cqp->dev->hw_attrs.min_hw_qp_id || qp->qp_uk.qp_id >= cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt) return -EINVAL; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) return -ENOMEM; set_64bit_val(wqe, 16, qp->hw_host_ctx_pa); set_64bit_val(wqe, 40, qp->shadow_area_pa); hdr = qp->qp_uk.qp_id | FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_QP) | FIELD_PREP(IRDMA_CQPSQ_QP_ORDVALID, (info->ord_valid ? 1 : 0)) | FIELD_PREP(IRDMA_CQPSQ_QP_TOECTXVALID, info->tcp_ctx_valid) | FIELD_PREP(IRDMA_CQPSQ_QP_MACVALID, info->mac_valid) | FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) | FIELD_PREP(IRDMA_CQPSQ_QP_VQ, qp->virtual_map) | FIELD_PREP(IRDMA_CQPSQ_QP_FORCELOOPBACK, info->force_lpb) | FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, info->cq_num_valid) | FIELD_PREP(IRDMA_CQPSQ_QP_ARPTABIDXVALID, info->arp_cache_idx_valid) | FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, info->next_iwarp_state) | FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, hdr); print_hex_dump_debug("WQE: QP_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); if (post_sq) irdma_sc_cqp_post_sq(cqp); return 0; } /** * irdma_sc_qp_modify - modify qp cqp wqe * @qp: sc qp * @info: modify qp info * @scratch: u64 saved to be used during cqp completion * @post_sq: flag for cqp db to ring */ int irdma_sc_qp_modify(struct irdma_sc_qp *qp, struct irdma_modify_qp_info *info, u64 scratch, bool post_sq) { __le64 *wqe; struct irdma_sc_cqp *cqp; u64 hdr; u8 term_actions = 0; u8 term_len = 0; cqp = qp->dev->cqp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) return -ENOMEM; if (info->next_iwarp_state == IRDMA_QP_STATE_TERMINATE) { if (info->dont_send_fin) term_actions += IRDMAQP_TERM_SEND_TERM_ONLY; if (info->dont_send_term) term_actions += IRDMAQP_TERM_SEND_FIN_ONLY; if (term_actions == IRDMAQP_TERM_SEND_TERM_AND_FIN || term_actions == IRDMAQP_TERM_SEND_TERM_ONLY) term_len = info->termlen; } set_64bit_val(wqe, 8, FIELD_PREP(IRDMA_CQPSQ_QP_NEWMSS, info->new_mss) | FIELD_PREP(IRDMA_CQPSQ_QP_TERMLEN, term_len)); set_64bit_val(wqe, 16, qp->hw_host_ctx_pa); set_64bit_val(wqe, 40, qp->shadow_area_pa); hdr = qp->qp_uk.qp_id | FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MODIFY_QP) | FIELD_PREP(IRDMA_CQPSQ_QP_ORDVALID, info->ord_valid) | FIELD_PREP(IRDMA_CQPSQ_QP_TOECTXVALID, info->tcp_ctx_valid) | FIELD_PREP(IRDMA_CQPSQ_QP_CACHEDVARVALID, info->cached_var_valid) | FIELD_PREP(IRDMA_CQPSQ_QP_VQ, qp->virtual_map) | FIELD_PREP(IRDMA_CQPSQ_QP_FORCELOOPBACK, info->force_lpb) | FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, info->cq_num_valid) | FIELD_PREP(IRDMA_CQPSQ_QP_MACVALID, info->mac_valid) | FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) | FIELD_PREP(IRDMA_CQPSQ_QP_MSSCHANGE, info->mss_change) | FIELD_PREP(IRDMA_CQPSQ_QP_REMOVEHASHENTRY, info->remove_hash_idx) | FIELD_PREP(IRDMA_CQPSQ_QP_TERMACT, term_actions) | FIELD_PREP(IRDMA_CQPSQ_QP_RESETCON, info->reset_tcp_conn) | FIELD_PREP(IRDMA_CQPSQ_QP_ARPTABIDXVALID, info->arp_cache_idx_valid) | FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, info->next_iwarp_state) | FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, hdr); print_hex_dump_debug("WQE: QP_MODIFY WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); if (post_sq) irdma_sc_cqp_post_sq(cqp); return 0; } /** * irdma_sc_qp_destroy - cqp destroy qp * @qp: sc qp * @scratch: u64 saved to be used during cqp completion * @remove_hash_idx: flag if to remove hash idx * @ignore_mw_bnd: memory window bind flag * @post_sq: flag for cqp db to ring */ int irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch, bool remove_hash_idx, bool ignore_mw_bnd, bool post_sq) { __le64 *wqe; struct irdma_sc_cqp *cqp; u64 hdr; cqp = qp->dev->cqp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) return -ENOMEM; set_64bit_val(wqe, 16, qp->hw_host_ctx_pa); set_64bit_val(wqe, 40, qp->shadow_area_pa); hdr = qp->qp_uk.qp_id | FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_QP) | FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) | FIELD_PREP(IRDMA_CQPSQ_QP_IGNOREMWBOUND, ignore_mw_bnd) | FIELD_PREP(IRDMA_CQPSQ_QP_REMOVEHASHENTRY, remove_hash_idx) | FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, hdr); print_hex_dump_debug("WQE: QP_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); if (post_sq) irdma_sc_cqp_post_sq(cqp); return 0; } /** * irdma_sc_get_encoded_ird_size - * @ird_size: IRD size * The ird from the connection is rounded to a supported HW setting and then encoded * for ird_size field of qp_ctx. Consumers are expected to provide valid ird size based * on hardware attributes. IRD size defaults to a value of 4 in case of invalid input */ static u8 irdma_sc_get_encoded_ird_size(u16 ird_size) { switch (ird_size ? roundup_pow_of_two(2 * ird_size) : 4) { case 256: return IRDMA_IRD_HW_SIZE_256; case 128: return IRDMA_IRD_HW_SIZE_128; case 64: case 32: return IRDMA_IRD_HW_SIZE_64; case 16: case 8: return IRDMA_IRD_HW_SIZE_16; case 4: default: break; } return IRDMA_IRD_HW_SIZE_4; } /** * irdma_sc_qp_setctx_roce - set qp's context * @qp: sc qp * @qp_ctx: context ptr * @info: ctx info */ void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx, struct irdma_qp_host_ctx_info *info) { struct irdma_roce_offload_info *roce_info; struct irdma_udp_offload_info *udp; u8 push_mode_en; u32 push_idx; roce_info = info->roce_info; udp = info->udp_info; qp->user_pri = info->user_pri; if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) { push_mode_en = 0; push_idx = 0; } else { push_mode_en = 1; push_idx = qp->push_idx; } set_64bit_val(qp_ctx, 0, FIELD_PREP(IRDMAQPC_RQWQESIZE, qp->qp_uk.rq_wqe_size) | FIELD_PREP(IRDMAQPC_RCVTPHEN, qp->rcv_tph_en) | FIELD_PREP(IRDMAQPC_XMITTPHEN, qp->xmit_tph_en) | FIELD_PREP(IRDMAQPC_RQTPHEN, qp->rq_tph_en) | FIELD_PREP(IRDMAQPC_SQTPHEN, qp->sq_tph_en) | FIELD_PREP(IRDMAQPC_PPIDX, push_idx) | FIELD_PREP(IRDMAQPC_PMENA, push_mode_en) | FIELD_PREP(IRDMAQPC_PDIDXHI, roce_info->pd_id >> 16) | FIELD_PREP(IRDMAQPC_DC_TCP_EN, roce_info->dctcp_en) | FIELD_PREP(IRDMAQPC_ERR_RQ_IDX_VALID, roce_info->err_rq_idx_valid) | FIELD_PREP(IRDMAQPC_ISQP1, roce_info->is_qp1) | FIELD_PREP(IRDMAQPC_ROCE_TVER, roce_info->roce_tver) | FIELD_PREP(IRDMAQPC_IPV4, udp->ipv4) | FIELD_PREP(IRDMAQPC_INSERTVLANTAG, udp->insert_vlan_tag)); set_64bit_val(qp_ctx, 8, qp->sq_pa); set_64bit_val(qp_ctx, 16, qp->rq_pa); if ((roce_info->dcqcn_en || roce_info->dctcp_en) && !(udp->tos & 0x03)) udp->tos |= ECN_CODE_PT_VAL; set_64bit_val(qp_ctx, 24, FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) | FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size) | FIELD_PREP(IRDMAQPC_TTL, udp->ttl) | FIELD_PREP(IRDMAQPC_TOS, udp->tos) | FIELD_PREP(IRDMAQPC_SRCPORTNUM, udp->src_port) | FIELD_PREP(IRDMAQPC_DESTPORTNUM, udp->dst_port)); set_64bit_val(qp_ctx, 32, FIELD_PREP(IRDMAQPC_DESTIPADDR2, udp->dest_ip_addr[2]) | FIELD_PREP(IRDMAQPC_DESTIPADDR3, udp->dest_ip_addr[3])); set_64bit_val(qp_ctx, 40, FIELD_PREP(IRDMAQPC_DESTIPADDR0, udp->dest_ip_addr[0]) | FIELD_PREP(IRDMAQPC_DESTIPADDR1, udp->dest_ip_addr[1])); set_64bit_val(qp_ctx, 48, FIELD_PREP(IRDMAQPC_SNDMSS, udp->snd_mss) | FIELD_PREP(IRDMAQPC_VLANTAG, udp->vlan_tag) | FIELD_PREP(IRDMAQPC_ARPIDX, udp->arp_idx)); set_64bit_val(qp_ctx, 56, FIELD_PREP(IRDMAQPC_PKEY, roce_info->p_key) | FIELD_PREP(IRDMAQPC_PDIDX, roce_info->pd_id) | FIELD_PREP(IRDMAQPC_ACKCREDITS, roce_info->ack_credits) | FIELD_PREP(IRDMAQPC_FLOWLABEL, udp->flow_label)); set_64bit_val(qp_ctx, 64, FIELD_PREP(IRDMAQPC_QKEY, roce_info->qkey) | FIELD_PREP(IRDMAQPC_DESTQP, roce_info->dest_qp)); set_64bit_val(qp_ctx, 80, FIELD_PREP(IRDMAQPC_PSNNXT, udp->psn_nxt) | FIELD_PREP(IRDMAQPC_LSN, udp->lsn)); set_64bit_val(qp_ctx, 88, FIELD_PREP(IRDMAQPC_EPSN, udp->epsn)); set_64bit_val(qp_ctx, 96, FIELD_PREP(IRDMAQPC_PSNMAX, udp->psn_max) | FIELD_PREP(IRDMAQPC_PSNUNA, udp->psn_una)); set_64bit_val(qp_ctx, 112, FIELD_PREP(IRDMAQPC_CWNDROCE, udp->cwnd)); set_64bit_val(qp_ctx, 128, FIELD_PREP(IRDMAQPC_ERR_RQ_IDX, roce_info->err_rq_idx) | FIELD_PREP(IRDMAQPC_RNRNAK_THRESH, udp->rnr_nak_thresh) | FIELD_PREP(IRDMAQPC_REXMIT_THRESH, udp->rexmit_thresh) | FIELD_PREP(IRDMAQPC_RTOMIN, roce_info->rtomin)); set_64bit_val(qp_ctx, 136, FIELD_PREP(IRDMAQPC_TXCQNUM, info->send_cq_num) | FIELD_PREP(IRDMAQPC_RXCQNUM, info->rcv_cq_num)); set_64bit_val(qp_ctx, 144, FIELD_PREP(IRDMAQPC_STAT_INDEX, info->stats_idx)); set_64bit_val(qp_ctx, 152, ether_addr_to_u64(roce_info->mac_addr) << 16); set_64bit_val(qp_ctx, 160, FIELD_PREP(IRDMAQPC_ORDSIZE, roce_info->ord_size) | FIELD_PREP(IRDMAQPC_IRDSIZE, irdma_sc_get_encoded_ird_size(roce_info->ird_size)) | FIELD_PREP(IRDMAQPC_WRRDRSPOK, roce_info->wr_rdresp_en) | FIELD_PREP(IRDMAQPC_RDOK, roce_info->rd_en) | FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, info->stats_idx_valid) | FIELD_PREP(IRDMAQPC_BINDEN, roce_info->bind_en) | FIELD_PREP(IRDMAQPC_FASTREGEN, roce_info->fast_reg_en) | FIELD_PREP(IRDMAQPC_DCQCNENABLE, roce_info->dcqcn_en) | FIELD_PREP(IRDMAQPC_RCVNOICRC, roce_info->rcv_no_icrc) | FIELD_PREP(IRDMAQPC_FW_CC_ENABLE, roce_info->fw_cc_enable) | FIELD_PREP(IRDMAQPC_UDPRIVCQENABLE, roce_info->udprivcq_en) | FIELD_PREP(IRDMAQPC_PRIVEN, roce_info->priv_mode_en) | FIELD_PREP(IRDMAQPC_TIMELYENABLE, roce_info->timely_en)); set_64bit_val(qp_ctx, 168, FIELD_PREP(IRDMAQPC_QPCOMPCTX, info->qp_compl_ctx)); set_64bit_val(qp_ctx, 176, FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) | FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) | FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle)); set_64bit_val(qp_ctx, 184, FIELD_PREP(IRDMAQPC_LOCAL_IPADDR3, udp->local_ipaddr[3]) | FIELD_PREP(IRDMAQPC_LOCAL_IPADDR2, udp->local_ipaddr[2])); set_64bit_val(qp_ctx, 192, FIELD_PREP(IRDMAQPC_LOCAL_IPADDR1, udp->local_ipaddr[1]) | FIELD_PREP(IRDMAQPC_LOCAL_IPADDR0, udp->local_ipaddr[0])); set_64bit_val(qp_ctx, 200, FIELD_PREP(IRDMAQPC_THIGH, roce_info->t_high) | FIELD_PREP(IRDMAQPC_TLOW, roce_info->t_low)); set_64bit_val(qp_ctx, 208, FIELD_PREP(IRDMAQPC_REMENDPOINTIDX, info->rem_endpoint_idx)); print_hex_dump_debug("WQE: QP_HOST CTX WQE", DUMP_PREFIX_OFFSET, 16, 8, qp_ctx, IRDMA_QP_CTX_SIZE, false); } /* irdma_sc_alloc_local_mac_entry - allocate a mac entry * @cqp: struct for cqp hw * @scratch: u64 saved to be used during cqp completion * @post_sq: flag for cqp db to ring */ static int irdma_sc_alloc_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch, bool post_sq) { __le64 *wqe; u64 hdr; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) return -ENOMEM; hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_ALLOCATE_LOC_MAC_TABLE_ENTRY) | FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, hdr); print_hex_dump_debug("WQE: ALLOCATE_LOCAL_MAC WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); if (post_sq) irdma_sc_cqp_post_sq(cqp); return 0; } /** * irdma_sc_add_local_mac_entry - add mac enry * @cqp: struct for cqp hw * @info:mac addr info * @scratch: u64 saved to be used during cqp completion * @post_sq: flag for cqp db to ring */ static int irdma_sc_add_local_mac_entry(struct irdma_sc_cqp *cqp, struct irdma_local_mac_entry_info *info, u64 scratch, bool post_sq) { __le64 *wqe; u64 header; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) return -ENOMEM; set_64bit_val(wqe, 32, ether_addr_to_u64(info->mac_addr)); header = FIELD_PREP(IRDMA_CQPSQ_MLM_TABLEIDX, info->entry_idx) | FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE) | FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, header); print_hex_dump_debug("WQE: ADD_LOCAL_MAC WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); if (post_sq) irdma_sc_cqp_post_sq(cqp); return 0; } /** * irdma_sc_del_local_mac_entry - cqp wqe to dele local mac * @cqp: struct for cqp hw * @scratch: u64 saved to be used during cqp completion * @entry_idx: index of mac entry * @ignore_ref_count: to force mac adde delete * @post_sq: flag for cqp db to ring */ static int irdma_sc_del_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch, u16 entry_idx, u8 ignore_ref_count, bool post_sq) { __le64 *wqe; u64 header; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) return -ENOMEM; header = FIELD_PREP(IRDMA_CQPSQ_MLM_TABLEIDX, entry_idx) | FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE) | FIELD_PREP(IRDMA_CQPSQ_MLM_FREEENTRY, 1) | FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) | FIELD_PREP(IRDMA_CQPSQ_MLM_IGNORE_REF_CNT, ignore_ref_count); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, header); print_hex_dump_debug("WQE: DEL_LOCAL_MAC_IPADDR WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); if (post_sq) irdma_sc_cqp_post_sq(cqp); return 0; } /** * irdma_sc_qp_setctx - set qp's context * @qp: sc qp * @qp_ctx: context ptr * @info: ctx info */ void irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 *qp_ctx, struct irdma_qp_host_ctx_info *info) { struct irdma_iwarp_offload_info *iw; struct irdma_tcp_offload_info *tcp; struct irdma_sc_dev *dev; u8 push_mode_en; u32 push_idx; u64 qw0, qw3, qw7 = 0, qw16 = 0; u64 mac = 0; iw = info->iwarp_info; tcp = info->tcp_info; dev = qp->dev; if (iw->rcv_mark_en) { qp->pfpdu.marker_len = 4; qp->pfpdu.rcv_start_seq = tcp->rcv_nxt; } qp->user_pri = info->user_pri; if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) { push_mode_en = 0; push_idx = 0; } else { push_mode_en = 1; push_idx = qp->push_idx; } qw0 = FIELD_PREP(IRDMAQPC_RQWQESIZE, qp->qp_uk.rq_wqe_size) | FIELD_PREP(IRDMAQPC_RCVTPHEN, qp->rcv_tph_en) | FIELD_PREP(IRDMAQPC_XMITTPHEN, qp->xmit_tph_en) | FIELD_PREP(IRDMAQPC_RQTPHEN, qp->rq_tph_en) | FIELD_PREP(IRDMAQPC_SQTPHEN, qp->sq_tph_en) | FIELD_PREP(IRDMAQPC_PPIDX, push_idx) | FIELD_PREP(IRDMAQPC_PMENA, push_mode_en); set_64bit_val(qp_ctx, 8, qp->sq_pa); set_64bit_val(qp_ctx, 16, qp->rq_pa); qw3 = FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) | FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size); if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) qw3 |= FIELD_PREP(IRDMAQPC_GEN1_SRCMACADDRIDX, qp->src_mac_addr_idx); set_64bit_val(qp_ctx, 136, FIELD_PREP(IRDMAQPC_TXCQNUM, info->send_cq_num) | FIELD_PREP(IRDMAQPC_RXCQNUM, info->rcv_cq_num)); set_64bit_val(qp_ctx, 168, FIELD_PREP(IRDMAQPC_QPCOMPCTX, info->qp_compl_ctx)); set_64bit_val(qp_ctx, 176, FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) | FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) | FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle) | FIELD_PREP(IRDMAQPC_EXCEPTION_LAN_QUEUE, qp->ieq_qp)); if (info->iwarp_info_valid) { qw0 |= FIELD_PREP(IRDMAQPC_DDP_VER, iw->ddp_ver) | FIELD_PREP(IRDMAQPC_RDMAP_VER, iw->rdmap_ver) | FIELD_PREP(IRDMAQPC_DC_TCP_EN, iw->dctcp_en) | FIELD_PREP(IRDMAQPC_ECN_EN, iw->ecn_en) | FIELD_PREP(IRDMAQPC_IBRDENABLE, iw->ib_rd_en) | FIELD_PREP(IRDMAQPC_PDIDXHI, iw->pd_id >> 16) | FIELD_PREP(IRDMAQPC_ERR_RQ_IDX_VALID, iw->err_rq_idx_valid); qw7 |= FIELD_PREP(IRDMAQPC_PDIDX, iw->pd_id); qw16 |= FIELD_PREP(IRDMAQPC_ERR_RQ_IDX, iw->err_rq_idx) | FIELD_PREP(IRDMAQPC_RTOMIN, iw->rtomin); set_64bit_val(qp_ctx, 144, FIELD_PREP(IRDMAQPC_Q2ADDR, qp->q2_pa >> 8) | FIELD_PREP(IRDMAQPC_STAT_INDEX, info->stats_idx)); if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) mac = ether_addr_to_u64(iw->mac_addr); set_64bit_val(qp_ctx, 152, mac << 16 | FIELD_PREP(IRDMAQPC_LASTBYTESENT, iw->last_byte_sent)); set_64bit_val(qp_ctx, 160, FIELD_PREP(IRDMAQPC_ORDSIZE, iw->ord_size) | FIELD_PREP(IRDMAQPC_IRDSIZE, irdma_sc_get_encoded_ird_size(iw->ird_size)) | FIELD_PREP(IRDMAQPC_WRRDRSPOK, iw->wr_rdresp_en) | FIELD_PREP(IRDMAQPC_RDOK, iw->rd_en) | FIELD_PREP(IRDMAQPC_SNDMARKERS, iw->snd_mark_en) | FIELD_PREP(IRDMAQPC_BINDEN, iw->bind_en) | FIELD_PREP(IRDMAQPC_FASTREGEN, iw->fast_reg_en) | FIELD_PREP(IRDMAQPC_PRIVEN, iw->priv_mode_en) | FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, info->stats_idx_valid) | FIELD_PREP(IRDMAQPC_IWARPMODE, 1) | FIELD_PREP(IRDMAQPC_RCVMARKERS, iw->rcv_mark_en) | FIELD_PREP(IRDMAQPC_ALIGNHDRS, iw->align_hdrs) | FIELD_PREP(IRDMAQPC_RCVNOMPACRC, iw->rcv_no_mpa_crc) | FIELD_PREP(IRDMAQPC_RCVMARKOFFSET, iw->rcv_mark_offset || !tcp ? iw->rcv_mark_offset : tcp->rcv_nxt) | FIELD_PREP(IRDMAQPC_SNDMARKOFFSET, iw->snd_mark_offset || !tcp ? iw->snd_mark_offset : tcp->snd_nxt) | FIELD_PREP(IRDMAQPC_TIMELYENABLE, iw->timely_en)); } if (info->tcp_info_valid) { qw0 |= FIELD_PREP(IRDMAQPC_IPV4, tcp->ipv4) | FIELD_PREP(IRDMAQPC_NONAGLE, tcp->no_nagle) | FIELD_PREP(IRDMAQPC_INSERTVLANTAG, tcp->insert_vlan_tag) | FIELD_PREP(IRDMAQPC_TIMESTAMP, tcp->time_stamp) | FIELD_PREP(IRDMAQPC_LIMIT, tcp->cwnd_inc_limit) | FIELD_PREP(IRDMAQPC_DROPOOOSEG, tcp->drop_ooo_seg) | FIELD_PREP(IRDMAQPC_DUPACK_THRESH, tcp->dup_ack_thresh); if ((iw->ecn_en || iw->dctcp_en) && !(tcp->tos & 0x03)) tcp->tos |= ECN_CODE_PT_VAL; qw3 |= FIELD_PREP(IRDMAQPC_TTL, tcp->ttl) | FIELD_PREP(IRDMAQPC_AVOIDSTRETCHACK, tcp->avoid_stretch_ack) | FIELD_PREP(IRDMAQPC_TOS, tcp->tos) | FIELD_PREP(IRDMAQPC_SRCPORTNUM, tcp->src_port) | FIELD_PREP(IRDMAQPC_DESTPORTNUM, tcp->dst_port); if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) { qw3 |= FIELD_PREP(IRDMAQPC_GEN1_SRCMACADDRIDX, tcp->src_mac_addr_idx); qp->src_mac_addr_idx = tcp->src_mac_addr_idx; } set_64bit_val(qp_ctx, 32, FIELD_PREP(IRDMAQPC_DESTIPADDR2, tcp->dest_ip_addr[2]) | FIELD_PREP(IRDMAQPC_DESTIPADDR3, tcp->dest_ip_addr[3])); set_64bit_val(qp_ctx, 40, FIELD_PREP(IRDMAQPC_DESTIPADDR0, tcp->dest_ip_addr[0]) | FIELD_PREP(IRDMAQPC_DESTIPADDR1, tcp->dest_ip_addr[1])); set_64bit_val(qp_ctx, 48, FIELD_PREP(IRDMAQPC_SNDMSS, tcp->snd_mss) | FIELD_PREP(IRDMAQPC_SYN_RST_HANDLING, tcp->syn_rst_handling) | FIELD_PREP(IRDMAQPC_VLANTAG, tcp->vlan_tag) | FIELD_PREP(IRDMAQPC_ARPIDX, tcp->arp_idx)); qw7 |= FIELD_PREP(IRDMAQPC_FLOWLABEL, tcp->flow_label) | FIELD_PREP(IRDMAQPC_WSCALE, tcp->wscale) | FIELD_PREP(IRDMAQPC_IGNORE_TCP_OPT, tcp->ignore_tcp_opt) | FIELD_PREP(IRDMAQPC_IGNORE_TCP_UNS_OPT, tcp->ignore_tcp_uns_opt) | FIELD_PREP(IRDMAQPC_TCPSTATE, tcp->tcp_state) | FIELD_PREP(IRDMAQPC_RCVSCALE, tcp->rcv_wscale) | FIELD_PREP(IRDMAQPC_SNDSCALE, tcp->snd_wscale); set_64bit_val(qp_ctx, 72, FIELD_PREP(IRDMAQPC_TIMESTAMP_RECENT, tcp->time_stamp_recent) | FIELD_PREP(IRDMAQPC_TIMESTAMP_AGE, tcp->time_stamp_age)); set_64bit_val(qp_ctx, 80, FIELD_PREP(IRDMAQPC_SNDNXT, tcp->snd_nxt) | FIELD_PREP(IRDMAQPC_SNDWND, tcp->snd_wnd)); set_64bit_val(qp_ctx, 88, FIELD_PREP(IRDMAQPC_RCVNXT, tcp->rcv_nxt) | FIELD_PREP(IRDMAQPC_RCVWND, tcp->rcv_wnd)); set_64bit_val(qp_ctx, 96, FIELD_PREP(IRDMAQPC_SNDMAX, tcp->snd_max) | FIELD_PREP(IRDMAQPC_SNDUNA, tcp->snd_una)); set_64bit_val(qp_ctx, 104, FIELD_PREP(IRDMAQPC_SRTT, tcp->srtt) | FIELD_PREP(IRDMAQPC_RTTVAR, tcp->rtt_var)); set_64bit_val(qp_ctx, 112, FIELD_PREP(IRDMAQPC_SSTHRESH, tcp->ss_thresh) | FIELD_PREP(IRDMAQPC_CWND, tcp->cwnd)); set_64bit_val(qp_ctx, 120, FIELD_PREP(IRDMAQPC_SNDWL1, tcp->snd_wl1) | FIELD_PREP(IRDMAQPC_SNDWL2, tcp->snd_wl2)); qw16 |= FIELD_PREP(IRDMAQPC_MAXSNDWND, tcp->max_snd_window) | FIELD_PREP(IRDMAQPC_REXMIT_THRESH, tcp->rexmit_thresh); set_64bit_val(qp_ctx, 184, FIELD_PREP(IRDMAQPC_LOCAL_IPADDR3, tcp->local_ipaddr[3]) | FIELD_PREP(IRDMAQPC_LOCAL_IPADDR2, tcp->local_ipaddr[2])); set_64bit_val(qp_ctx, 192, FIELD_PREP(IRDMAQPC_LOCAL_IPADDR1, tcp->local_ipaddr[1]) | FIELD_PREP(IRDMAQPC_LOCAL_IPADDR0, tcp->local_ipaddr[0])); set_64bit_val(qp_ctx, 200, FIELD_PREP(IRDMAQPC_THIGH, iw->t_high) | FIELD_PREP(IRDMAQPC_TLOW, iw->t_low)); set_64bit_val(qp_ctx, 208, FIELD_PREP(IRDMAQPC_REMENDPOINTIDX, info->rem_endpoint_idx)); } set_64bit_val(qp_ctx, 0, qw0); set_64bit_val(qp_ctx, 24, qw3); set_64bit_val(qp_ctx, 56, qw7); set_64bit_val(qp_ctx, 128, qw16); print_hex_dump_debug("WQE: QP_HOST CTX", DUMP_PREFIX_OFFSET, 16, 8, qp_ctx, IRDMA_QP_CTX_SIZE, false); } /** * irdma_sc_alloc_stag - mr stag alloc * @dev: sc device struct * @info: stag info * @scratch: u64 saved to be used during cqp completion * @post_sq: flag for cqp db to ring */ static int irdma_sc_alloc_stag(struct irdma_sc_dev *dev, struct irdma_allocate_stag_info *info, u64 scratch, bool post_sq) { __le64 *wqe; struct irdma_sc_cqp *cqp; u64 hdr; enum irdma_page_size page_size; if (!info->total_len && !info->all_memory) return -EINVAL; if (info->page_size == 0x40000000) page_size = IRDMA_PAGE_SIZE_1G; else if (info->page_size == 0x200000) page_size = IRDMA_PAGE_SIZE_2M; else page_size = IRDMA_PAGE_SIZE_4K; cqp = dev->cqp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) return -ENOMEM; set_64bit_val(wqe, 8, FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID) | FIELD_PREP(IRDMA_CQPSQ_STAG_STAGLEN, info->total_len)); set_64bit_val(wqe, 16, FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx)); set_64bit_val(wqe, 40, FIELD_PREP(IRDMA_CQPSQ_STAG_HMCFNIDX, info->hmc_fcn_index)); if (info->chunk_size) set_64bit_val(wqe, 48, FIELD_PREP(IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX, info->first_pm_pbl_idx)); hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_ALLOC_STAG) | FIELD_PREP(IRDMA_CQPSQ_STAG_MR, 1) | FIELD_PREP(IRDMA_CQPSQ_STAG_ARIGHTS, info->access_rights) | FIELD_PREP(IRDMA_CQPSQ_STAG_LPBLSIZE, info->chunk_size) | FIELD_PREP(IRDMA_CQPSQ_STAG_HPAGESIZE, page_size) | FIELD_PREP(IRDMA_CQPSQ_STAG_REMACCENABLED, info->remote_access) | FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) | FIELD_PREP(IRDMA_CQPSQ_STAG_USEPFRID, info->use_pf_rid) | FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, hdr); print_hex_dump_debug("WQE: ALLOC_STAG WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); if (post_sq) irdma_sc_cqp_post_sq(cqp); return 0; } /** * irdma_sc_mr_reg_non_shared - non-shared mr registration * @dev: sc device struct * @info: mr info * @scratch: u64 saved to be used during cqp completion * @post_sq: flag for cqp db to ring */ static int irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev, struct irdma_reg_ns_stag_info *info, u64 scratch, bool post_sq) { __le64 *wqe; u64 fbo; struct irdma_sc_cqp *cqp; u64 hdr; u32 pble_obj_cnt; bool remote_access; u8 addr_type; enum irdma_page_size page_size; if (!info->total_len && !info->all_memory) return -EINVAL; if (info->page_size == 0x40000000) page_size = IRDMA_PAGE_SIZE_1G; else if (info->page_size == 0x200000) page_size = IRDMA_PAGE_SIZE_2M; else if (info->page_size == 0x1000) page_size = IRDMA_PAGE_SIZE_4K; else return -EINVAL; if (info->access_rights & (IRDMA_ACCESS_FLAGS_REMOTEREAD_ONLY | IRDMA_ACCESS_FLAGS_REMOTEWRITE_ONLY)) remote_access = true; else remote_access = false; pble_obj_cnt = dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt; if (info->chunk_size && info->first_pm_pbl_index >= pble_obj_cnt) return -EINVAL; cqp = dev->cqp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) return -ENOMEM; fbo = info->va & (info->page_size - 1); set_64bit_val(wqe, 0, (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED ? info->va : fbo)); set_64bit_val(wqe, 8, FIELD_PREP(IRDMA_CQPSQ_STAG_STAGLEN, info->total_len) | FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID)); set_64bit_val(wqe, 16, FIELD_PREP(IRDMA_CQPSQ_STAG_KEY, info->stag_key) | FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx)); if (!info->chunk_size) { set_64bit_val(wqe, 32, info->reg_addr_pa); set_64bit_val(wqe, 48, 0); } else { set_64bit_val(wqe, 32, 0); set_64bit_val(wqe, 48, FIELD_PREP(IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX, info->first_pm_pbl_index)); } set_64bit_val(wqe, 40, info->hmc_fcn_index); set_64bit_val(wqe, 56, 0); addr_type = (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED) ? 1 : 0; hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_REG_MR) | FIELD_PREP(IRDMA_CQPSQ_STAG_MR, 1) | FIELD_PREP(IRDMA_CQPSQ_STAG_LPBLSIZE, info->chunk_size) | FIELD_PREP(IRDMA_CQPSQ_STAG_HPAGESIZE, page_size) | FIELD_PREP(IRDMA_CQPSQ_STAG_ARIGHTS, info->access_rights) | FIELD_PREP(IRDMA_CQPSQ_STAG_REMACCENABLED, remote_access) | FIELD_PREP(IRDMA_CQPSQ_STAG_VABASEDTO, addr_type) | FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) | FIELD_PREP(IRDMA_CQPSQ_STAG_USEPFRID, info->use_pf_rid) | FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, hdr); print_hex_dump_debug("WQE: MR_REG_NS WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); if (post_sq) irdma_sc_cqp_post_sq(cqp); return 0; } /** * irdma_sc_dealloc_stag - deallocate stag * @dev: sc device struct * @info: dealloc stag info * @scratch: u64 saved to be used during cqp completion * @post_sq: flag for cqp db to ring */ static int irdma_sc_dealloc_stag(struct irdma_sc_dev *dev, struct irdma_dealloc_stag_info *info, u64 scratch, bool post_sq) { u64 hdr; __le64 *wqe; struct irdma_sc_cqp *cqp; cqp = dev->cqp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) return -ENOMEM; set_64bit_val(wqe, 8, FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID)); set_64bit_val(wqe, 16, FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx)); hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DEALLOC_STAG) | FIELD_PREP(IRDMA_CQPSQ_STAG_MR, info->mr) | FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, hdr); print_hex_dump_debug("WQE: DEALLOC_STAG WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); if (post_sq) irdma_sc_cqp_post_sq(cqp); return 0; } /** * irdma_sc_mw_alloc - mw allocate * @dev: sc device struct * @info: memory window allocation information * @scratch: u64 saved to be used during cqp completion * @post_sq: flag for cqp db to ring */ static int irdma_sc_mw_alloc(struct irdma_sc_dev *dev, struct irdma_mw_alloc_info *info, u64 scratch, bool post_sq) { u64 hdr; struct irdma_sc_cqp *cqp; __le64 *wqe; cqp = dev->cqp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) return -ENOMEM; set_64bit_val(wqe, 8, FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID)); set_64bit_val(wqe, 16, FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->mw_stag_index)); hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_ALLOC_STAG) | FIELD_PREP(IRDMA_CQPSQ_STAG_MWTYPE, info->mw_wide) | FIELD_PREP(IRDMA_CQPSQ_STAG_MW1_BIND_DONT_VLDT_KEY, info->mw1_bind_dont_vldt_key) | FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, hdr); print_hex_dump_debug("WQE: MW_ALLOC WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); if (post_sq) irdma_sc_cqp_post_sq(cqp); return 0; } /** * irdma_sc_mr_fast_register - Posts RDMA fast register mr WR to iwarp qp * @qp: sc qp struct * @info: fast mr info * @post_sq: flag for cqp db to ring */ int irdma_sc_mr_fast_register(struct irdma_sc_qp *qp, struct irdma_fast_reg_stag_info *info, bool post_sq) { u64 temp, hdr; __le64 *wqe; u32 wqe_idx; enum irdma_page_size page_size; struct irdma_post_sq_info sq_info = {}; if (info->page_size == 0x40000000) page_size = IRDMA_PAGE_SIZE_1G; else if (info->page_size == 0x200000) page_size = IRDMA_PAGE_SIZE_2M; else page_size = IRDMA_PAGE_SIZE_4K; sq_info.wr_id = info->wr_id; sq_info.signaled = info->signaled; wqe = irdma_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA, 0, &sq_info); if (!wqe) return -ENOMEM; irdma_clr_wqes(&qp->qp_uk, wqe_idx); ibdev_dbg(to_ibdev(qp->dev), "MR: wr_id[%llxh] wqe_idx[%04d] location[%p]\n", info->wr_id, wqe_idx, &qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid); temp = (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED) ? (uintptr_t)info->va : info->fbo; set_64bit_val(wqe, 0, temp); temp = FIELD_GET(IRDMAQPSQ_FIRSTPMPBLIDXHI, info->first_pm_pbl_index >> 16); set_64bit_val(wqe, 8, FIELD_PREP(IRDMAQPSQ_FIRSTPMPBLIDXHI, temp) | FIELD_PREP(IRDMAQPSQ_PBLADDR >> IRDMA_HW_PAGE_SHIFT, info->reg_addr_pa)); set_64bit_val(wqe, 16, info->total_len | FIELD_PREP(IRDMAQPSQ_FIRSTPMPBLIDXLO, info->first_pm_pbl_index)); hdr = FIELD_PREP(IRDMAQPSQ_STAGKEY, info->stag_key) | FIELD_PREP(IRDMAQPSQ_STAGINDEX, info->stag_idx) | FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_FAST_REGISTER) | FIELD_PREP(IRDMAQPSQ_LPBLSIZE, info->chunk_size) | FIELD_PREP(IRDMAQPSQ_HPAGESIZE, page_size) | FIELD_PREP(IRDMAQPSQ_STAGRIGHTS, info->access_rights) | FIELD_PREP(IRDMAQPSQ_VABASEDTO, info->addr_type) | FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) | FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) | FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, hdr); print_hex_dump_debug("WQE: FAST_REG WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_QP_WQE_MIN_SIZE, false); if (post_sq) irdma_uk_qp_post_wr(&qp->qp_uk); return 0; } /** * irdma_sc_gen_rts_ae - request AE generated after RTS * @qp: sc qp struct */ static void irdma_sc_gen_rts_ae(struct irdma_sc_qp *qp) { __le64 *wqe; u64 hdr; struct irdma_qp_uk *qp_uk; qp_uk = &qp->qp_uk; wqe = qp_uk->sq_base[1].elem; hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) | FIELD_PREP(IRDMAQPSQ_LOCALFENCE, 1) | FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, hdr); print_hex_dump_debug("QP: NOP W/LOCAL FENCE WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_QP_WQE_MIN_SIZE, false); wqe = qp_uk->sq_base[2].elem; hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_GEN_RTS_AE) | FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, hdr); print_hex_dump_debug("QP: CONN EST WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_QP_WQE_MIN_SIZE, false); } /** * irdma_sc_send_lsmm - send last streaming mode message * @qp: sc qp struct * @lsmm_buf: buffer with lsmm message * @size: size of lsmm buffer * @stag: stag of lsmm buffer */ void irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size, irdma_stag stag) { __le64 *wqe; u64 hdr; struct irdma_qp_uk *qp_uk; qp_uk = &qp->qp_uk; wqe = qp_uk->sq_base->elem; set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf); if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) { set_64bit_val(wqe, 8, FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, size) | FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, stag)); } else { set_64bit_val(wqe, 8, FIELD_PREP(IRDMAQPSQ_FRAG_LEN, size) | FIELD_PREP(IRDMAQPSQ_FRAG_STAG, stag) | FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity)); } set_64bit_val(wqe, 16, 0); hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_SEND) | FIELD_PREP(IRDMAQPSQ_STREAMMODE, 1) | FIELD_PREP(IRDMAQPSQ_WAITFORRCVPDU, 1) | FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, hdr); print_hex_dump_debug("WQE: SEND_LSMM WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_QP_WQE_MIN_SIZE, false); if (qp->dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE) irdma_sc_gen_rts_ae(qp); } /** * irdma_sc_send_rtt - send last read0 or write0 * @qp: sc qp struct * @read: Do read0 or write0 */ void irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read) { __le64 *wqe; u64 hdr; struct irdma_qp_uk *qp_uk; qp_uk = &qp->qp_uk; wqe = qp_uk->sq_base->elem; set_64bit_val(wqe, 0, 0); set_64bit_val(wqe, 16, 0); if (read) { if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) { set_64bit_val(wqe, 8, FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, 0xabcd)); } else { set_64bit_val(wqe, 8, (u64)0xabcd | FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity)); } hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, 0x1234) | FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_READ) | FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity); } else { if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) { set_64bit_val(wqe, 8, 0); } else { set_64bit_val(wqe, 8, FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity)); } hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_WRITE) | FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity); } dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, hdr); print_hex_dump_debug("WQE: RTR WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_QP_WQE_MIN_SIZE, false); if (qp->dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE) irdma_sc_gen_rts_ae(qp); } /** * irdma_iwarp_opcode - determine if incoming is rdma layer * @info: aeq info for the packet * @pkt: packet for error */ static u32 irdma_iwarp_opcode(struct irdma_aeqe_info *info, u8 *pkt) { __be16 *mpa; u32 opcode = 0xffffffff; if (info->q2_data_written) { mpa = (__be16 *)pkt; opcode = ntohs(mpa[1]) & 0xf; } return opcode; } /** * irdma_locate_mpa - return pointer to mpa in the pkt * @pkt: packet with data */ static u8 *irdma_locate_mpa(u8 *pkt) { /* skip over ethernet header */ pkt += IRDMA_MAC_HLEN; /* Skip over IP and TCP headers */ pkt += 4 * (pkt[0] & 0x0f); pkt += 4 * ((pkt[12] >> 4) & 0x0f); return pkt; } /** * irdma_bld_termhdr_ctrl - setup terminate hdr control fields * @qp: sc qp ptr for pkt * @hdr: term hdr * @opcode: flush opcode for termhdr * @layer_etype: error layer + error type * @err: error cod ein the header */ static void irdma_bld_termhdr_ctrl(struct irdma_sc_qp *qp, struct irdma_terminate_hdr *hdr, enum irdma_flush_opcode opcode, u8 layer_etype, u8 err) { qp->flush_code = opcode; hdr->layer_etype = layer_etype; hdr->error_code = err; } /** * irdma_bld_termhdr_ddp_rdma - setup ddp and rdma hdrs in terminate hdr * @pkt: ptr to mpa in offending pkt * @hdr: term hdr * @copy_len: offending pkt length to be copied to term hdr * @is_tagged: DDP tagged or untagged */ static void irdma_bld_termhdr_ddp_rdma(u8 *pkt, struct irdma_terminate_hdr *hdr, int *copy_len, u8 *is_tagged) { u16 ddp_seg_len; ddp_seg_len = ntohs(*(__be16 *)pkt); if (ddp_seg_len) { *copy_len = 2; hdr->hdrct = DDP_LEN_FLAG; if (pkt[2] & 0x80) { *is_tagged = 1; if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) { *copy_len += TERM_DDP_LEN_TAGGED; hdr->hdrct |= DDP_HDR_FLAG; } } else { if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) { *copy_len += TERM_DDP_LEN_UNTAGGED; hdr->hdrct |= DDP_HDR_FLAG; } if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN) && ((pkt[3] & RDMA_OPCODE_M) == RDMA_READ_REQ_OPCODE)) { *copy_len += TERM_RDMA_LEN; hdr->hdrct |= RDMA_HDR_FLAG; } } } } /** * irdma_bld_terminate_hdr - build terminate message header * @qp: qp associated with received terminate AE * @info: the struct contiaing AE information */ static int irdma_bld_terminate_hdr(struct irdma_sc_qp *qp, struct irdma_aeqe_info *info) { u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET; int copy_len = 0; u8 is_tagged = 0; u32 opcode; struct irdma_terminate_hdr *termhdr; termhdr = (struct irdma_terminate_hdr *)qp->q2_buf; memset(termhdr, 0, Q2_BAD_FRAME_OFFSET); if (info->q2_data_written) { pkt = irdma_locate_mpa(pkt); irdma_bld_termhdr_ddp_rdma(pkt, termhdr, &copy_len, &is_tagged); } opcode = irdma_iwarp_opcode(info, pkt); qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; qp->sq_flush_code = info->sq; qp->rq_flush_code = info->rq; switch (info->ae_id) { case IRDMA_AE_AMP_UNALLOCATED_STAG: qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; if (opcode == IRDMA_OP_TYPE_RDMA_WRITE) irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR, (LAYER_DDP << 4) | DDP_TAGGED_BUF, DDP_TAGGED_INV_STAG); else irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR, (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_STAG); break; case IRDMA_AE_AMP_BOUNDS_VIOLATION: qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; if (info->q2_data_written) irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR, (LAYER_DDP << 4) | DDP_TAGGED_BUF, DDP_TAGGED_BOUNDS); else irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR, (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_BOUNDS); break; case IRDMA_AE_AMP_BAD_PD: switch (opcode) { case IRDMA_OP_TYPE_RDMA_WRITE: irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR, (LAYER_DDP << 4) | DDP_TAGGED_BUF, DDP_TAGGED_UNASSOC_STAG); break; case IRDMA_OP_TYPE_SEND_INV: case IRDMA_OP_TYPE_SEND_SOL_INV: irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR, (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_CANT_INV_STAG); break; default: irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR, (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_UNASSOC_STAG); } break; case IRDMA_AE_AMP_INVALID_STAG: qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR, (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_STAG); break; case IRDMA_AE_AMP_BAD_QP: irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_QP_OP_ERR, (LAYER_DDP << 4) | DDP_UNTAGGED_BUF, DDP_UNTAGGED_INV_QN); break; case IRDMA_AE_AMP_BAD_STAG_KEY: case IRDMA_AE_AMP_BAD_STAG_INDEX: qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; switch (opcode) { case IRDMA_OP_TYPE_SEND_INV: case IRDMA_OP_TYPE_SEND_SOL_INV: irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_OP_ERR, (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_CANT_INV_STAG); break; default: irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR, (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_INV_STAG); } break; case IRDMA_AE_AMP_RIGHTS_VIOLATION: case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS: case IRDMA_AE_PRIV_OPERATION_DENIED: qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR, (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_ACCESS); break; case IRDMA_AE_AMP_TO_WRAP: qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR, (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_TO_WRAP); break; case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR: irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR, (LAYER_MPA << 4) | DDP_LLP, MPA_CRC); break; case IRDMA_AE_LLP_SEGMENT_TOO_SMALL: irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_LEN_ERR, (LAYER_DDP << 4) | DDP_CATASTROPHIC, DDP_CATASTROPHIC_LOCAL); break; case IRDMA_AE_LCE_QP_CATASTROPHIC: case IRDMA_AE_DDP_NO_L_BIT: irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_FATAL_ERR, (LAYER_DDP << 4) | DDP_CATASTROPHIC, DDP_CATASTROPHIC_LOCAL); break; case IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN: irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR, (LAYER_DDP << 4) | DDP_UNTAGGED_BUF, DDP_UNTAGGED_INV_MSN_RANGE); break; case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER: qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_LEN_ERR, (LAYER_DDP << 4) | DDP_UNTAGGED_BUF, DDP_UNTAGGED_INV_TOO_LONG); break; case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION: if (is_tagged) irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR, (LAYER_DDP << 4) | DDP_TAGGED_BUF, DDP_TAGGED_INV_DDP_VER); else irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR, (LAYER_DDP << 4) | DDP_UNTAGGED_BUF, DDP_UNTAGGED_INV_DDP_VER); break; case IRDMA_AE_DDP_UBE_INVALID_MO: irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR, (LAYER_DDP << 4) | DDP_UNTAGGED_BUF, DDP_UNTAGGED_INV_MO); break; case IRDMA_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE: irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_OP_ERR, (LAYER_DDP << 4) | DDP_UNTAGGED_BUF, DDP_UNTAGGED_INV_MSN_NO_BUF); break; case IRDMA_AE_DDP_UBE_INVALID_QN: irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR, (LAYER_DDP << 4) | DDP_UNTAGGED_BUF, DDP_UNTAGGED_INV_QN); break; case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION: irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR, (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_INV_RDMAP_VER); break; default: irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_FATAL_ERR, (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_UNSPECIFIED); break; } if (copy_len) memcpy(termhdr + 1, pkt, copy_len); return sizeof(struct irdma_terminate_hdr) + copy_len; } /** * irdma_terminate_send_fin() - Send fin for terminate message * @qp: qp associated with received terminate AE */ void irdma_terminate_send_fin(struct irdma_sc_qp *qp) { irdma_term_modify_qp(qp, IRDMA_QP_STATE_TERMINATE, IRDMAQP_TERM_SEND_FIN_ONLY, 0); } /** * irdma_terminate_connection() - Bad AE and send terminate to remote QP * @qp: qp associated with received terminate AE * @info: the struct contiaing AE information */ void irdma_terminate_connection(struct irdma_sc_qp *qp, struct irdma_aeqe_info *info) { u8 termlen = 0; if (qp->term_flags & IRDMA_TERM_SENT) return; termlen = irdma_bld_terminate_hdr(qp, info); irdma_terminate_start_timer(qp); qp->term_flags |= IRDMA_TERM_SENT; irdma_term_modify_qp(qp, IRDMA_QP_STATE_TERMINATE, IRDMAQP_TERM_SEND_TERM_ONLY, termlen); } /** * irdma_terminate_received - handle terminate received AE * @qp: qp associated with received terminate AE * @info: the struct contiaing AE information */ void irdma_terminate_received(struct irdma_sc_qp *qp, struct irdma_aeqe_info *info) { u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET; __be32 *mpa; u8 ddp_ctl; u8 rdma_ctl; u16 aeq_id = 0; struct irdma_terminate_hdr *termhdr; mpa = (__be32 *)irdma_locate_mpa(pkt); if (info->q2_data_written) { /* did not validate the frame - do it now */ ddp_ctl = (ntohl(mpa[0]) >> 8) & 0xff; rdma_ctl = ntohl(mpa[0]) & 0xff; if ((ddp_ctl & 0xc0) != 0x40) aeq_id = IRDMA_AE_LCE_QP_CATASTROPHIC; else if ((ddp_ctl & 0x03) != 1) aeq_id = IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION; else if (ntohl(mpa[2]) != 2) aeq_id = IRDMA_AE_DDP_UBE_INVALID_QN; else if (ntohl(mpa[3]) != 1) aeq_id = IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN; else if (ntohl(mpa[4]) != 0) aeq_id = IRDMA_AE_DDP_UBE_INVALID_MO; else if ((rdma_ctl & 0xc0) != 0x40) aeq_id = IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION; info->ae_id = aeq_id; if (info->ae_id) { /* Bad terminate recvd - send back a terminate */ irdma_terminate_connection(qp, info); return; } } qp->term_flags |= IRDMA_TERM_RCVD; qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; termhdr = (struct irdma_terminate_hdr *)&mpa[5]; if (termhdr->layer_etype == RDMAP_REMOTE_PROT || termhdr->layer_etype == RDMAP_REMOTE_OP) { irdma_terminate_done(qp, 0); } else { irdma_terminate_start_timer(qp); irdma_terminate_send_fin(qp); } } static int irdma_null_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri) { return 0; } static void irdma_null_ws_remove(struct irdma_sc_vsi *vsi, u8 user_pri) { /* do nothing */ } static void irdma_null_ws_reset(struct irdma_sc_vsi *vsi) { /* do nothing */ } /** * irdma_sc_vsi_init - Init the vsi structure * @vsi: pointer to vsi structure to initialize * @info: the info used to initialize the vsi struct */ void irdma_sc_vsi_init(struct irdma_sc_vsi *vsi, struct irdma_vsi_init_info *info) { int i; vsi->dev = info->dev; vsi->back_vsi = info->back_vsi; vsi->register_qset = info->register_qset; vsi->unregister_qset = info->unregister_qset; vsi->mtu = info->params->mtu; vsi->exception_lan_q = info->exception_lan_q; vsi->vsi_idx = info->pf_data_vsi_num; irdma_set_qos_info(vsi, info->params); for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) { mutex_init(&vsi->qos[i].qos_mutex); INIT_LIST_HEAD(&vsi->qos[i].qplist); } if (vsi->register_qset) { vsi->dev->ws_add = irdma_ws_add; vsi->dev->ws_remove = irdma_ws_remove; vsi->dev->ws_reset = irdma_ws_reset; } else { vsi->dev->ws_add = irdma_null_ws_add; vsi->dev->ws_remove = irdma_null_ws_remove; vsi->dev->ws_reset = irdma_null_ws_reset; } } /** * irdma_get_stats_idx - Return stats index * @vsi: pointer to the vsi */ static u8 irdma_get_stats_idx(struct irdma_sc_vsi *vsi) { struct irdma_stats_inst_info stats_info = {}; struct irdma_sc_dev *dev = vsi->dev; u8 i; if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { if (!irdma_cqp_stats_inst_cmd(vsi, IRDMA_OP_STATS_ALLOCATE, &stats_info)) return stats_info.stats_idx; } for (i = 0; i < IRDMA_MAX_STATS_COUNT_GEN_1; i++) { if (!dev->stats_idx_array[i]) { dev->stats_idx_array[i] = true; return i; } } return IRDMA_INVALID_STATS_IDX; } /** * irdma_hw_stats_init_gen1 - Initialize stat reg table used for gen1 * @vsi: vsi structure where hw_regs are set * * Populate the HW stats table */ static void irdma_hw_stats_init_gen1(struct irdma_sc_vsi *vsi) { struct irdma_sc_dev *dev = vsi->dev; const struct irdma_hw_stat_map *map; u64 *stat_reg = vsi->hw_stats_regs; u64 *regs = dev->hw_stats_regs; u16 i, stats_reg_set = vsi->stats_idx; map = dev->hw_stats_map; /* First 4 stat instances are reserved for port level statistics. */ stats_reg_set += vsi->stats_inst_alloc ? IRDMA_FIRST_NON_PF_STAT : 0; for (i = 0; i < dev->hw_attrs.max_stat_idx; i++) { if (map[i].bitmask <= IRDMA_MAX_STATS_32) stat_reg[i] = regs[i] + stats_reg_set * sizeof(u32); else stat_reg[i] = regs[i] + stats_reg_set * sizeof(u64); } } /** * irdma_vsi_stats_init - Initialize the vsi statistics * @vsi: pointer to the vsi structure * @info: The info structure used for initialization */ int irdma_vsi_stats_init(struct irdma_sc_vsi *vsi, struct irdma_vsi_stats_info *info) { struct irdma_dma_mem *stats_buff_mem; vsi->pestat = info->pestat; vsi->pestat->hw = vsi->dev->hw; vsi->pestat->vsi = vsi; stats_buff_mem = &vsi->pestat->gather_info.stats_buff_mem; stats_buff_mem->size = ALIGN(IRDMA_GATHER_STATS_BUF_SIZE * 2, 1); stats_buff_mem->va = dma_alloc_coherent(vsi->pestat->hw->device, stats_buff_mem->size, &stats_buff_mem->pa, GFP_KERNEL); if (!stats_buff_mem->va) return -ENOMEM; vsi->pestat->gather_info.gather_stats_va = stats_buff_mem->va; vsi->pestat->gather_info.last_gather_stats_va = (void *)((uintptr_t)stats_buff_mem->va + IRDMA_GATHER_STATS_BUF_SIZE); irdma_hw_stats_start_timer(vsi); /* when stat allocation is not required default to fcn_id. */ vsi->stats_idx = info->fcn_id; if (info->alloc_stats_inst) { u8 stats_idx = irdma_get_stats_idx(vsi); if (stats_idx != IRDMA_INVALID_STATS_IDX) { vsi->stats_inst_alloc = true; vsi->stats_idx = stats_idx; vsi->pestat->gather_info.use_stats_inst = true; vsi->pestat->gather_info.stats_inst_index = stats_idx; } } if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) irdma_hw_stats_init_gen1(vsi); return 0; } /** * irdma_vsi_stats_free - Free the vsi stats * @vsi: pointer to the vsi structure */ void irdma_vsi_stats_free(struct irdma_sc_vsi *vsi) { struct irdma_stats_inst_info stats_info = {}; struct irdma_sc_dev *dev = vsi->dev; u8 stats_idx = vsi->stats_idx; if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { if (vsi->stats_inst_alloc) { stats_info.stats_idx = vsi->stats_idx; irdma_cqp_stats_inst_cmd(vsi, IRDMA_OP_STATS_FREE, &stats_info); } } else { if (vsi->stats_inst_alloc && stats_idx < vsi->dev->hw_attrs.max_stat_inst) vsi->dev->stats_idx_array[stats_idx] = false; } if (!vsi->pestat) return; irdma_hw_stats_stop_timer(vsi); dma_free_coherent(vsi->pestat->hw->device, vsi->pestat->gather_info.stats_buff_mem.size, vsi->pestat->gather_info.stats_buff_mem.va, vsi->pestat->gather_info.stats_buff_mem.pa); vsi->pestat->gather_info.stats_buff_mem.va = NULL; } /** * irdma_get_encoded_wqe_size - given wq size, returns hardware encoded size * @wqsize: size of the wq (sq, rq) to encoded_size * @queue_type: queue type selected for the calculation algorithm */ u8 irdma_get_encoded_wqe_size(u32 wqsize, enum irdma_queue_type queue_type) { u8 encoded_size = 0; /* cqp sq's hw coded value starts from 1 for size of 4 * while it starts from 0 for qp' wq's. */ if (queue_type == IRDMA_QUEUE_TYPE_CQP) encoded_size = 1; wqsize >>= 2; while (wqsize >>= 1) encoded_size++; return encoded_size; } /** * irdma_sc_gather_stats - collect the statistics * @cqp: struct for cqp hw * @info: gather stats info structure * @scratch: u64 saved to be used during cqp completion */ static int irdma_sc_gather_stats(struct irdma_sc_cqp *cqp, struct irdma_stats_gather_info *info, u64 scratch) { __le64 *wqe; u64 temp; if (info->stats_buff_mem.size < IRDMA_GATHER_STATS_BUF_SIZE) return -ENOMEM; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) return -ENOMEM; set_64bit_val(wqe, 40, FIELD_PREP(IRDMA_CQPSQ_STATS_HMC_FCN_INDEX, info->hmc_fcn_index)); set_64bit_val(wqe, 32, info->stats_buff_mem.pa); temp = FIELD_PREP(IRDMA_CQPSQ_STATS_WQEVALID, cqp->polarity) | FIELD_PREP(IRDMA_CQPSQ_STATS_USE_INST, info->use_stats_inst) | FIELD_PREP(IRDMA_CQPSQ_STATS_INST_INDEX, info->stats_inst_index) | FIELD_PREP(IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX, info->use_hmc_fcn_index) | FIELD_PREP(IRDMA_CQPSQ_STATS_OP, IRDMA_CQP_OP_GATHER_STATS); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, temp); print_hex_dump_debug("STATS: GATHER_STATS WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); irdma_sc_cqp_post_sq(cqp); ibdev_dbg(to_ibdev(cqp->dev), "STATS: CQP SQ head 0x%x tail 0x%x size 0x%x\n", cqp->sq_ring.head, cqp->sq_ring.tail, cqp->sq_ring.size); return 0; } /** * irdma_sc_manage_stats_inst - allocate or free stats instance * @cqp: struct for cqp hw * @info: stats info structure * @alloc: alloc vs. delete flag * @scratch: u64 saved to be used during cqp completion */ static int irdma_sc_manage_stats_inst(struct irdma_sc_cqp *cqp, struct irdma_stats_inst_info *info, bool alloc, u64 scratch) { __le64 *wqe; u64 temp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) return -ENOMEM; set_64bit_val(wqe, 40, FIELD_PREP(IRDMA_CQPSQ_STATS_HMC_FCN_INDEX, info->hmc_fn_id)); temp = FIELD_PREP(IRDMA_CQPSQ_STATS_WQEVALID, cqp->polarity) | FIELD_PREP(IRDMA_CQPSQ_STATS_ALLOC_INST, alloc) | FIELD_PREP(IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX, info->use_hmc_fcn_index) | FIELD_PREP(IRDMA_CQPSQ_STATS_INST_INDEX, info->stats_idx) | FIELD_PREP(IRDMA_CQPSQ_STATS_OP, IRDMA_CQP_OP_MANAGE_STATS); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, temp); print_hex_dump_debug("WQE: MANAGE_STATS WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); irdma_sc_cqp_post_sq(cqp); return 0; } /** * irdma_sc_set_up_map - set the up map table * @cqp: struct for cqp hw * @info: User priority map info * @scratch: u64 saved to be used during cqp completion */ static int irdma_sc_set_up_map(struct irdma_sc_cqp *cqp, struct irdma_up_info *info, u64 scratch) { __le64 *wqe; u64 temp = 0; int i; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) return -ENOMEM; for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) temp |= (u64)info->map[i] << (i * 8); set_64bit_val(wqe, 0, temp); set_64bit_val(wqe, 40, FIELD_PREP(IRDMA_CQPSQ_UP_CNPOVERRIDE, info->cnp_up_override) | FIELD_PREP(IRDMA_CQPSQ_UP_HMCFCNIDX, info->hmc_fcn_idx)); temp = FIELD_PREP(IRDMA_CQPSQ_UP_WQEVALID, cqp->polarity) | FIELD_PREP(IRDMA_CQPSQ_UP_USEVLAN, info->use_vlan) | FIELD_PREP(IRDMA_CQPSQ_UP_USEOVERRIDE, info->use_cnp_up_override) | FIELD_PREP(IRDMA_CQPSQ_UP_OP, IRDMA_CQP_OP_UP_MAP); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, temp); print_hex_dump_debug("WQE: UPMAP WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); irdma_sc_cqp_post_sq(cqp); return 0; } /** * irdma_sc_manage_ws_node - create/modify/destroy WS node * @cqp: struct for cqp hw * @info: node info structure * @node_op: 0 for add 1 for modify, 2 for delete * @scratch: u64 saved to be used during cqp completion */ static int irdma_sc_manage_ws_node(struct irdma_sc_cqp *cqp, struct irdma_ws_node_info *info, enum irdma_ws_node_op node_op, u64 scratch) { __le64 *wqe; u64 temp = 0; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) return -ENOMEM; set_64bit_val(wqe, 32, FIELD_PREP(IRDMA_CQPSQ_WS_VSI, info->vsi) | FIELD_PREP(IRDMA_CQPSQ_WS_WEIGHT, info->weight)); temp = FIELD_PREP(IRDMA_CQPSQ_WS_WQEVALID, cqp->polarity) | FIELD_PREP(IRDMA_CQPSQ_WS_NODEOP, node_op) | FIELD_PREP(IRDMA_CQPSQ_WS_ENABLENODE, info->enable) | FIELD_PREP(IRDMA_CQPSQ_WS_NODETYPE, info->type_leaf) | FIELD_PREP(IRDMA_CQPSQ_WS_PRIOTYPE, info->prio_type) | FIELD_PREP(IRDMA_CQPSQ_WS_TC, info->tc) | FIELD_PREP(IRDMA_CQPSQ_WS_OP, IRDMA_CQP_OP_WORK_SCHED_NODE) | FIELD_PREP(IRDMA_CQPSQ_WS_PARENTID, info->parent_id) | FIELD_PREP(IRDMA_CQPSQ_WS_NODEID, info->id); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, temp); print_hex_dump_debug("WQE: MANAGE_WS WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); irdma_sc_cqp_post_sq(cqp); return 0; } /** * irdma_sc_qp_flush_wqes - flush qp's wqe * @qp: sc qp * @info: dlush information * @scratch: u64 saved to be used during cqp completion * @post_sq: flag for cqp db to ring */ int irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp, struct irdma_qp_flush_info *info, u64 scratch, bool post_sq) { u64 temp = 0; __le64 *wqe; struct irdma_sc_cqp *cqp; u64 hdr; bool flush_sq = false, flush_rq = false; if (info->rq && !qp->flush_rq) flush_rq = true; if (info->sq && !qp->flush_sq) flush_sq = true; qp->flush_sq |= flush_sq; qp->flush_rq |= flush_rq; if (!flush_sq && !flush_rq) { ibdev_dbg(to_ibdev(qp->dev), "CQP: Additional flush request ignored for qp %x\n", qp->qp_uk.qp_id); return -EALREADY; } cqp = qp->pd->dev->cqp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) return -ENOMEM; if (info->userflushcode) { if (flush_rq) temp |= FIELD_PREP(IRDMA_CQPSQ_FWQE_RQMNERR, info->rq_minor_code) | FIELD_PREP(IRDMA_CQPSQ_FWQE_RQMJERR, info->rq_major_code); if (flush_sq) temp |= FIELD_PREP(IRDMA_CQPSQ_FWQE_SQMNERR, info->sq_minor_code) | FIELD_PREP(IRDMA_CQPSQ_FWQE_SQMJERR, info->sq_major_code); } set_64bit_val(wqe, 16, temp); temp = (info->generate_ae) ? info->ae_code | FIELD_PREP(IRDMA_CQPSQ_FWQE_AESOURCE, info->ae_src) : 0; set_64bit_val(wqe, 8, temp); hdr = qp->qp_uk.qp_id | FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_FLUSH_WQES) | FIELD_PREP(IRDMA_CQPSQ_FWQE_GENERATE_AE, info->generate_ae) | FIELD_PREP(IRDMA_CQPSQ_FWQE_USERFLCODE, info->userflushcode) | FIELD_PREP(IRDMA_CQPSQ_FWQE_FLUSHSQ, flush_sq) | FIELD_PREP(IRDMA_CQPSQ_FWQE_FLUSHRQ, flush_rq) | FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, hdr); print_hex_dump_debug("WQE: QP_FLUSH WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); if (post_sq) irdma_sc_cqp_post_sq(cqp); return 0; } /** * irdma_sc_gen_ae - generate AE, uses flush WQE CQP OP * @qp: sc qp * @info: gen ae information * @scratch: u64 saved to be used during cqp completion * @post_sq: flag for cqp db to ring */ static int irdma_sc_gen_ae(struct irdma_sc_qp *qp, struct irdma_gen_ae_info *info, u64 scratch, bool post_sq) { u64 temp; __le64 *wqe; struct irdma_sc_cqp *cqp; u64 hdr; cqp = qp->pd->dev->cqp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) return -ENOMEM; temp = info->ae_code | FIELD_PREP(IRDMA_CQPSQ_FWQE_AESOURCE, info->ae_src); set_64bit_val(wqe, 8, temp); hdr = qp->qp_uk.qp_id | FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_GEN_AE) | FIELD_PREP(IRDMA_CQPSQ_FWQE_GENERATE_AE, 1) | FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, hdr); print_hex_dump_debug("WQE: GEN_AE WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); if (post_sq) irdma_sc_cqp_post_sq(cqp); return 0; } /*** irdma_sc_qp_upload_context - upload qp's context * @dev: sc device struct * @info: upload context info ptr for return * @scratch: u64 saved to be used during cqp completion * @post_sq: flag for cqp db to ring */ static int irdma_sc_qp_upload_context(struct irdma_sc_dev *dev, struct irdma_upload_context_info *info, u64 scratch, bool post_sq) { __le64 *wqe; struct irdma_sc_cqp *cqp; u64 hdr; cqp = dev->cqp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) return -ENOMEM; set_64bit_val(wqe, 16, info->buf_pa); hdr = FIELD_PREP(IRDMA_CQPSQ_UCTX_QPID, info->qp_id) | FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_UPLOAD_CONTEXT) | FIELD_PREP(IRDMA_CQPSQ_UCTX_QPTYPE, info->qp_type) | FIELD_PREP(IRDMA_CQPSQ_UCTX_RAWFORMAT, info->raw_format) | FIELD_PREP(IRDMA_CQPSQ_UCTX_FREEZEQP, info->freeze_qp) | FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, hdr); print_hex_dump_debug("WQE: QP_UPLOAD_CTX WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); if (post_sq) irdma_sc_cqp_post_sq(cqp); return 0; } /** * irdma_sc_manage_push_page - Handle push page * @cqp: struct for cqp hw * @info: push page info * @scratch: u64 saved to be used during cqp completion * @post_sq: flag for cqp db to ring */ static int irdma_sc_manage_push_page(struct irdma_sc_cqp *cqp, struct irdma_cqp_manage_push_page_info *info, u64 scratch, bool post_sq) { __le64 *wqe; u64 hdr; if (info->free_page && info->push_idx >= cqp->dev->hw_attrs.max_hw_device_pages) return -EINVAL; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) return -ENOMEM; set_64bit_val(wqe, 16, info->qs_handle); hdr = FIELD_PREP(IRDMA_CQPSQ_MPP_PPIDX, info->push_idx) | FIELD_PREP(IRDMA_CQPSQ_MPP_PPTYPE, info->push_page_type) | FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_PUSH_PAGES) | FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) | FIELD_PREP(IRDMA_CQPSQ_MPP_FREE_PAGE, info->free_page); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, hdr); print_hex_dump_debug("WQE: MANAGE_PUSH_PAGES WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); if (post_sq) irdma_sc_cqp_post_sq(cqp); return 0; } /** * irdma_sc_suspend_qp - suspend qp for param change * @cqp: struct for cqp hw * @qp: sc qp struct * @scratch: u64 saved to be used during cqp completion */ static int irdma_sc_suspend_qp(struct irdma_sc_cqp *cqp, struct irdma_sc_qp *qp, u64 scratch) { u64 hdr; __le64 *wqe; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) return -ENOMEM; hdr = FIELD_PREP(IRDMA_CQPSQ_SUSPENDQP_QPID, qp->qp_uk.qp_id) | FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_SUSPEND_QP) | FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, hdr); print_hex_dump_debug("WQE: SUSPEND_QP WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); irdma_sc_cqp_post_sq(cqp); return 0; } /** * irdma_sc_resume_qp - resume qp after suspend * @cqp: struct for cqp hw * @qp: sc qp struct * @scratch: u64 saved to be used during cqp completion */ static int irdma_sc_resume_qp(struct irdma_sc_cqp *cqp, struct irdma_sc_qp *qp, u64 scratch) { u64 hdr; __le64 *wqe; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) return -ENOMEM; set_64bit_val(wqe, 16, FIELD_PREP(IRDMA_CQPSQ_RESUMEQP_QSHANDLE, qp->qs_handle)); hdr = FIELD_PREP(IRDMA_CQPSQ_RESUMEQP_QPID, qp->qp_uk.qp_id) | FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_RESUME_QP) | FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, hdr); print_hex_dump_debug("WQE: RESUME_QP WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); irdma_sc_cqp_post_sq(cqp); return 0; } /** * irdma_sc_cq_ack - acknowledge completion q * @cq: cq struct */ static inline void irdma_sc_cq_ack(struct irdma_sc_cq *cq) { writel(cq->cq_uk.cq_id, cq->cq_uk.cq_ack_db); } /** * irdma_sc_cq_init - initialize completion q * @cq: cq struct * @info: cq initialization info */ int irdma_sc_cq_init(struct irdma_sc_cq *cq, struct irdma_cq_init_info *info) { u32 pble_obj_cnt; pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt; if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt) return -EINVAL; cq->cq_pa = info->cq_base_pa; cq->dev = info->dev; cq->ceq_id = info->ceq_id; info->cq_uk_init_info.cqe_alloc_db = cq->dev->cq_arm_db; info->cq_uk_init_info.cq_ack_db = cq->dev->cq_ack_db; irdma_uk_cq_init(&cq->cq_uk, &info->cq_uk_init_info); cq->virtual_map = info->virtual_map; cq->pbl_chunk_size = info->pbl_chunk_size; cq->ceqe_mask = info->ceqe_mask; cq->cq_type = (info->type) ? info->type : IRDMA_CQ_TYPE_IWARP; cq->shadow_area_pa = info->shadow_area_pa; cq->shadow_read_threshold = info->shadow_read_threshold; cq->ceq_id_valid = info->ceq_id_valid; cq->tph_en = info->tph_en; cq->tph_val = info->tph_val; cq->first_pm_pbl_idx = info->first_pm_pbl_idx; cq->vsi = info->vsi; return 0; } /** * irdma_sc_cq_create - create completion q * @cq: cq struct * @scratch: u64 saved to be used during cqp completion * @check_overflow: flag for overflow check * @post_sq: flag for cqp db to ring */ static int irdma_sc_cq_create(struct irdma_sc_cq *cq, u64 scratch, bool check_overflow, bool post_sq) { __le64 *wqe; struct irdma_sc_cqp *cqp; u64 hdr; struct irdma_sc_ceq *ceq; int ret_code = 0; cqp = cq->dev->cqp; if (cq->cq_uk.cq_id >= cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt) return -EINVAL; if (cq->ceq_id >= cq->dev->hmc_fpm_misc.max_ceqs) return -EINVAL; ceq = cq->dev->ceq[cq->ceq_id]; if (ceq && ceq->reg_cq) ret_code = irdma_sc_add_cq_ctx(ceq, cq); if (ret_code) return ret_code; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) { if (ceq && ceq->reg_cq) irdma_sc_remove_cq_ctx(ceq, cq); return -ENOMEM; } set_64bit_val(wqe, 0, cq->cq_uk.cq_size); set_64bit_val(wqe, 8, (uintptr_t)cq >> 1); set_64bit_val(wqe, 16, FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, cq->shadow_read_threshold)); set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa)); set_64bit_val(wqe, 40, cq->shadow_area_pa); set_64bit_val(wqe, 48, FIELD_PREP(IRDMA_CQPSQ_CQ_FIRSTPMPBLIDX, (cq->virtual_map ? cq->first_pm_pbl_idx : 0))); set_64bit_val(wqe, 56, FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) | FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx)); hdr = FLD_LS_64(cq->dev, cq->cq_uk.cq_id, IRDMA_CQPSQ_CQ_CQID) | FLD_LS_64(cq->dev, (cq->ceq_id_valid ? cq->ceq_id : 0), IRDMA_CQPSQ_CQ_CEQID) | FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CQ) | FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, cq->pbl_chunk_size) | FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, check_overflow) | FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, cq->virtual_map) | FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) | FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, cq->ceq_id_valid) | FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) | FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT, cq->cq_uk.avoid_mem_cflct) | FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, hdr); print_hex_dump_debug("WQE: CQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); if (post_sq) irdma_sc_cqp_post_sq(cqp); return 0; } /** * irdma_sc_cq_destroy - destroy completion q * @cq: cq struct * @scratch: u64 saved to be used during cqp completion * @post_sq: flag for cqp db to ring */ int irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, bool post_sq) { struct irdma_sc_cqp *cqp; __le64 *wqe; u64 hdr; struct irdma_sc_ceq *ceq; cqp = cq->dev->cqp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) return -ENOMEM; ceq = cq->dev->ceq[cq->ceq_id]; if (ceq && ceq->reg_cq) irdma_sc_remove_cq_ctx(ceq, cq); set_64bit_val(wqe, 0, cq->cq_uk.cq_size); set_64bit_val(wqe, 8, (uintptr_t)cq >> 1); set_64bit_val(wqe, 40, cq->shadow_area_pa); set_64bit_val(wqe, 48, (cq->virtual_map ? cq->first_pm_pbl_idx : 0)); hdr = cq->cq_uk.cq_id | FLD_LS_64(cq->dev, (cq->ceq_id_valid ? cq->ceq_id : 0), IRDMA_CQPSQ_CQ_CEQID) | FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CQ) | FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, cq->pbl_chunk_size) | FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, cq->virtual_map) | FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) | FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, cq->ceq_id_valid) | FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) | FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT, cq->cq_uk.avoid_mem_cflct) | FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, hdr); print_hex_dump_debug("WQE: CQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); if (post_sq) irdma_sc_cqp_post_sq(cqp); return 0; } /** * irdma_sc_cq_resize - set resized cq buffer info * @cq: resized cq * @info: resized cq buffer info */ void irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info) { cq->virtual_map = info->virtual_map; cq->cq_pa = info->cq_pa; cq->first_pm_pbl_idx = info->first_pm_pbl_idx; cq->pbl_chunk_size = info->pbl_chunk_size; irdma_uk_cq_resize(&cq->cq_uk, info->cq_base, info->cq_size); } /** * irdma_sc_cq_modify - modify a Completion Queue * @cq: cq struct * @info: modification info struct * @scratch: u64 saved to be used during cqp completion * @post_sq: flag to post to sq */ static int irdma_sc_cq_modify(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info, u64 scratch, bool post_sq) { struct irdma_sc_cqp *cqp; __le64 *wqe; u64 hdr; u32 pble_obj_cnt; pble_obj_cnt = cq->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt; if (info->cq_resize && info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt) return -EINVAL; cqp = cq->dev->cqp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) return -ENOMEM; set_64bit_val(wqe, 0, info->cq_size); set_64bit_val(wqe, 8, (uintptr_t)cq >> 1); set_64bit_val(wqe, 16, FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, info->shadow_read_threshold)); set_64bit_val(wqe, 32, info->cq_pa); set_64bit_val(wqe, 40, cq->shadow_area_pa); set_64bit_val(wqe, 48, info->first_pm_pbl_idx); set_64bit_val(wqe, 56, FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) | FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx)); hdr = cq->cq_uk.cq_id | FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MODIFY_CQ) | FIELD_PREP(IRDMA_CQPSQ_CQ_CQRESIZE, info->cq_resize) | FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, info->pbl_chunk_size) | FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, info->check_overflow) | FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, info->virtual_map) | FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) | FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) | FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT, cq->cq_uk.avoid_mem_cflct) | FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, hdr); print_hex_dump_debug("WQE: CQ_MODIFY WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); if (post_sq) irdma_sc_cqp_post_sq(cqp); return 0; } /** * irdma_check_cqp_progress - check cqp processing progress * @timeout: timeout info struct * @dev: sc device struct */ void irdma_check_cqp_progress(struct irdma_cqp_timeout *timeout, struct irdma_sc_dev *dev) { u64 completed_ops = atomic64_read(&dev->cqp->completed_ops); if (timeout->compl_cqp_cmds != completed_ops) { timeout->compl_cqp_cmds = completed_ops; timeout->count = 0; } else if (timeout->compl_cqp_cmds != dev->cqp->requested_ops) { timeout->count++; } } /** * irdma_get_cqp_reg_info - get head and tail for cqp using registers * @cqp: struct for cqp hw * @val: cqp tail register value * @tail: wqtail register value * @error: cqp processing err */ static inline void irdma_get_cqp_reg_info(struct irdma_sc_cqp *cqp, u32 *val, u32 *tail, u32 *error) { *val = readl(cqp->dev->hw_regs[IRDMA_CQPTAIL]); *tail = FIELD_GET(IRDMA_CQPTAIL_WQTAIL, *val); *error = FIELD_GET(IRDMA_CQPTAIL_CQP_OP_ERR, *val); } /** * irdma_cqp_poll_registers - poll cqp registers * @cqp: struct for cqp hw * @tail: wqtail register value * @count: how many times to try for completion */ static int irdma_cqp_poll_registers(struct irdma_sc_cqp *cqp, u32 tail, u32 count) { u32 i = 0; u32 newtail, error, val; while (i++ < count) { irdma_get_cqp_reg_info(cqp, &val, &newtail, &error); if (error) { error = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]); ibdev_dbg(to_ibdev(cqp->dev), "CQP: CQPERRCODES error_code[x%08X]\n", error); return -EIO; } if (newtail != tail) { /* SUCCESS */ IRDMA_RING_MOVE_TAIL(cqp->sq_ring); atomic64_inc(&cqp->completed_ops); return 0; } udelay(cqp->dev->hw_attrs.max_sleep_count); } return -ETIMEDOUT; } /** * irdma_sc_decode_fpm_commit - decode a 64 bit value into count and base * @dev: sc device struct * @buf: pointer to commit buffer * @buf_idx: buffer index * @obj_info: object info pointer * @rsrc_idx: indexs of memory resource */ static u64 irdma_sc_decode_fpm_commit(struct irdma_sc_dev *dev, __le64 *buf, u32 buf_idx, struct irdma_hmc_obj_info *obj_info, u32 rsrc_idx) { u64 temp; get_64bit_val(buf, buf_idx, &temp); switch (rsrc_idx) { case IRDMA_HMC_IW_QP: obj_info[rsrc_idx].cnt = (u32)FIELD_GET(IRDMA_COMMIT_FPM_QPCNT, temp); break; case IRDMA_HMC_IW_CQ: obj_info[rsrc_idx].cnt = (u32)FLD_RS_64(dev, temp, IRDMA_COMMIT_FPM_CQCNT); break; case IRDMA_HMC_IW_APBVT_ENTRY: obj_info[rsrc_idx].cnt = 1; break; default: obj_info[rsrc_idx].cnt = (u32)temp; break; } obj_info[rsrc_idx].base = (temp >> IRDMA_COMMIT_FPM_BASE_S) * 512; return temp; } /** * irdma_sc_parse_fpm_commit_buf - parse fpm commit buffer * @dev: pointer to dev struct * @buf: ptr to fpm commit buffer * @info: ptr to irdma_hmc_obj_info struct * @sd: number of SDs for HMC objects * * parses fpm commit info and copy base value * of hmc objects in hmc_info */ static void irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 *buf, struct irdma_hmc_obj_info *info, u32 *sd) { u64 size; u32 i; u64 max_base = 0; u32 last_hmc_obj = 0; irdma_sc_decode_fpm_commit(dev, buf, 0, info, IRDMA_HMC_IW_QP); irdma_sc_decode_fpm_commit(dev, buf, 8, info, IRDMA_HMC_IW_CQ); /* skiping RSRVD */ irdma_sc_decode_fpm_commit(dev, buf, 24, info, IRDMA_HMC_IW_HTE); irdma_sc_decode_fpm_commit(dev, buf, 32, info, IRDMA_HMC_IW_ARP); irdma_sc_decode_fpm_commit(dev, buf, 40, info, IRDMA_HMC_IW_APBVT_ENTRY); irdma_sc_decode_fpm_commit(dev, buf, 48, info, IRDMA_HMC_IW_MR); irdma_sc_decode_fpm_commit(dev, buf, 56, info, IRDMA_HMC_IW_XF); irdma_sc_decode_fpm_commit(dev, buf, 64, info, IRDMA_HMC_IW_XFFL); irdma_sc_decode_fpm_commit(dev, buf, 72, info, IRDMA_HMC_IW_Q1); irdma_sc_decode_fpm_commit(dev, buf, 80, info, IRDMA_HMC_IW_Q1FL); irdma_sc_decode_fpm_commit(dev, buf, 88, info, IRDMA_HMC_IW_TIMER); irdma_sc_decode_fpm_commit(dev, buf, 112, info, IRDMA_HMC_IW_PBLE); /* skipping RSVD. */ if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1) { irdma_sc_decode_fpm_commit(dev, buf, 96, info, IRDMA_HMC_IW_FSIMC); irdma_sc_decode_fpm_commit(dev, buf, 104, info, IRDMA_HMC_IW_FSIAV); irdma_sc_decode_fpm_commit(dev, buf, 128, info, IRDMA_HMC_IW_RRF); irdma_sc_decode_fpm_commit(dev, buf, 136, info, IRDMA_HMC_IW_RRFFL); irdma_sc_decode_fpm_commit(dev, buf, 144, info, IRDMA_HMC_IW_HDR); irdma_sc_decode_fpm_commit(dev, buf, 152, info, IRDMA_HMC_IW_MD); irdma_sc_decode_fpm_commit(dev, buf, 160, info, IRDMA_HMC_IW_OOISC); irdma_sc_decode_fpm_commit(dev, buf, 168, info, IRDMA_HMC_IW_OOISCFFL); } /* searching for the last object in HMC to find the size of the HMC area. */ for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++) { if (info[i].base > max_base) { max_base = info[i].base; last_hmc_obj = i; } } size = info[last_hmc_obj].cnt * info[last_hmc_obj].size + info[last_hmc_obj].base; if (size & 0x1FFFFF) *sd = (u32)((size >> 21) + 1); /* add 1 for remainder */ else *sd = (u32)(size >> 21); } /** * irdma_sc_decode_fpm_query() - Decode a 64 bit value into max count and size * @buf: ptr to fpm query buffer * @buf_idx: index into buf * @obj_info: ptr to irdma_hmc_obj_info struct * @rsrc_idx: resource index into info * * Decode a 64 bit value from fpm query buffer into max count and size */ static u64 irdma_sc_decode_fpm_query(__le64 *buf, u32 buf_idx, struct irdma_hmc_obj_info *obj_info, u32 rsrc_idx) { u64 temp; u32 size; get_64bit_val(buf, buf_idx, &temp); obj_info[rsrc_idx].max_cnt = (u32)temp; size = (u32)(temp >> 32); obj_info[rsrc_idx].size = BIT_ULL(size); return temp; } /** * irdma_sc_parse_fpm_query_buf() - parses fpm query buffer * @dev: ptr to shared code device * @buf: ptr to fpm query buffer * @hmc_info: ptr to irdma_hmc_obj_info struct * @hmc_fpm_misc: ptr to fpm data * * parses fpm query buffer and copy max_cnt and * size value of hmc objects in hmc_info */ static int irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf, struct irdma_hmc_info *hmc_info, struct irdma_hmc_fpm_misc *hmc_fpm_misc) { struct irdma_hmc_obj_info *obj_info; u64 temp; u32 size; u16 max_pe_sds; obj_info = hmc_info->hmc_obj; get_64bit_val(buf, 0, &temp); hmc_info->first_sd_index = (u16)FIELD_GET(IRDMA_QUERY_FPM_FIRST_PE_SD_INDEX, temp); max_pe_sds = (u16)FIELD_GET(IRDMA_QUERY_FPM_MAX_PE_SDS, temp); hmc_fpm_misc->max_sds = max_pe_sds; hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index; get_64bit_val(buf, 8, &temp); obj_info[IRDMA_HMC_IW_QP].max_cnt = (u32)FIELD_GET(IRDMA_QUERY_FPM_MAX_QPS, temp); size = (u32)(temp >> 32); obj_info[IRDMA_HMC_IW_QP].size = BIT_ULL(size); get_64bit_val(buf, 16, &temp); obj_info[IRDMA_HMC_IW_CQ].max_cnt = (u32)FIELD_GET(IRDMA_QUERY_FPM_MAX_CQS, temp); size = (u32)(temp >> 32); obj_info[IRDMA_HMC_IW_CQ].size = BIT_ULL(size); irdma_sc_decode_fpm_query(buf, 32, obj_info, IRDMA_HMC_IW_HTE); irdma_sc_decode_fpm_query(buf, 40, obj_info, IRDMA_HMC_IW_ARP); obj_info[IRDMA_HMC_IW_APBVT_ENTRY].size = 8192; obj_info[IRDMA_HMC_IW_APBVT_ENTRY].max_cnt = 1; irdma_sc_decode_fpm_query(buf, 48, obj_info, IRDMA_HMC_IW_MR); irdma_sc_decode_fpm_query(buf, 56, obj_info, IRDMA_HMC_IW_XF); get_64bit_val(buf, 64, &temp); obj_info[IRDMA_HMC_IW_XFFL].max_cnt = (u32)temp; obj_info[IRDMA_HMC_IW_XFFL].size = 4; hmc_fpm_misc->xf_block_size = FIELD_GET(IRDMA_QUERY_FPM_XFBLOCKSIZE, temp); if (!hmc_fpm_misc->xf_block_size) return -EINVAL; irdma_sc_decode_fpm_query(buf, 72, obj_info, IRDMA_HMC_IW_Q1); get_64bit_val(buf, 80, &temp); obj_info[IRDMA_HMC_IW_Q1FL].max_cnt = (u32)temp; obj_info[IRDMA_HMC_IW_Q1FL].size = 4; hmc_fpm_misc->q1_block_size = FIELD_GET(IRDMA_QUERY_FPM_Q1BLOCKSIZE, temp); if (!hmc_fpm_misc->q1_block_size) return -EINVAL; irdma_sc_decode_fpm_query(buf, 88, obj_info, IRDMA_HMC_IW_TIMER); get_64bit_val(buf, 112, &temp); obj_info[IRDMA_HMC_IW_PBLE].max_cnt = (u32)temp; obj_info[IRDMA_HMC_IW_PBLE].size = 8; get_64bit_val(buf, 120, &temp); hmc_fpm_misc->max_ceqs = FIELD_GET(IRDMA_QUERY_FPM_MAX_CEQS, temp); hmc_fpm_misc->ht_multiplier = FIELD_GET(IRDMA_QUERY_FPM_HTMULTIPLIER, temp); hmc_fpm_misc->timer_bucket = FIELD_GET(IRDMA_QUERY_FPM_TIMERBUCKET, temp); if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) return 0; irdma_sc_decode_fpm_query(buf, 96, obj_info, IRDMA_HMC_IW_FSIMC); irdma_sc_decode_fpm_query(buf, 104, obj_info, IRDMA_HMC_IW_FSIAV); irdma_sc_decode_fpm_query(buf, 128, obj_info, IRDMA_HMC_IW_RRF); get_64bit_val(buf, 136, &temp); obj_info[IRDMA_HMC_IW_RRFFL].max_cnt = (u32)temp; obj_info[IRDMA_HMC_IW_RRFFL].size = 4; hmc_fpm_misc->rrf_block_size = FIELD_GET(IRDMA_QUERY_FPM_RRFBLOCKSIZE, temp); if (!hmc_fpm_misc->rrf_block_size && obj_info[IRDMA_HMC_IW_RRFFL].max_cnt) return -EINVAL; irdma_sc_decode_fpm_query(buf, 144, obj_info, IRDMA_HMC_IW_HDR); irdma_sc_decode_fpm_query(buf, 152, obj_info, IRDMA_HMC_IW_MD); irdma_sc_decode_fpm_query(buf, 160, obj_info, IRDMA_HMC_IW_OOISC); get_64bit_val(buf, 168, &temp); obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt = (u32)temp; obj_info[IRDMA_HMC_IW_OOISCFFL].size = 4; hmc_fpm_misc->ooiscf_block_size = FIELD_GET(IRDMA_QUERY_FPM_OOISCFBLOCKSIZE, temp); if (!hmc_fpm_misc->ooiscf_block_size && obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt) return -EINVAL; return 0; } /** * irdma_sc_find_reg_cq - find cq ctx index * @ceq: ceq sc structure * @cq: cq sc structure */ static u32 irdma_sc_find_reg_cq(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq) { u32 i; for (i = 0; i < ceq->reg_cq_size; i++) { if (cq == ceq->reg_cq[i]) return i; } return IRDMA_INVALID_CQ_IDX; } /** * irdma_sc_add_cq_ctx - add cq ctx tracking for ceq * @ceq: ceq sc structure * @cq: cq sc structure */ int irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq) { unsigned long flags; spin_lock_irqsave(&ceq->req_cq_lock, flags); if (ceq->reg_cq_size == ceq->elem_cnt) { spin_unlock_irqrestore(&ceq->req_cq_lock, flags); return -ENOMEM; } ceq->reg_cq[ceq->reg_cq_size++] = cq; spin_unlock_irqrestore(&ceq->req_cq_lock, flags); return 0; } /** * irdma_sc_remove_cq_ctx - remove cq ctx tracking for ceq * @ceq: ceq sc structure * @cq: cq sc structure */ void irdma_sc_remove_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq) { unsigned long flags; u32 cq_ctx_idx; spin_lock_irqsave(&ceq->req_cq_lock, flags); cq_ctx_idx = irdma_sc_find_reg_cq(ceq, cq); if (cq_ctx_idx == IRDMA_INVALID_CQ_IDX) goto exit; ceq->reg_cq_size--; if (cq_ctx_idx != ceq->reg_cq_size) ceq->reg_cq[cq_ctx_idx] = ceq->reg_cq[ceq->reg_cq_size]; ceq->reg_cq[ceq->reg_cq_size] = NULL; exit: spin_unlock_irqrestore(&ceq->req_cq_lock, flags); } /** * irdma_sc_cqp_init - Initialize buffers for a control Queue Pair * @cqp: IWARP control queue pair pointer * @info: IWARP control queue pair init info pointer * * Initializes the object and context buffers for a control Queue Pair. */ int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp, struct irdma_cqp_init_info *info) { u8 hw_sq_size; if (info->sq_size > IRDMA_CQP_SW_SQSIZE_2048 || info->sq_size < IRDMA_CQP_SW_SQSIZE_4 || ((info->sq_size & (info->sq_size - 1)))) return -EINVAL; hw_sq_size = irdma_get_encoded_wqe_size(info->sq_size, IRDMA_QUEUE_TYPE_CQP); cqp->size = sizeof(*cqp); cqp->sq_size = info->sq_size; cqp->hw_sq_size = hw_sq_size; cqp->sq_base = info->sq; cqp->host_ctx = info->host_ctx; cqp->sq_pa = info->sq_pa; cqp->host_ctx_pa = info->host_ctx_pa; cqp->dev = info->dev; cqp->struct_ver = info->struct_ver; cqp->hw_maj_ver = info->hw_maj_ver; cqp->hw_min_ver = info->hw_min_ver; cqp->scratch_array = info->scratch_array; cqp->polarity = 0; cqp->en_datacenter_tcp = info->en_datacenter_tcp; cqp->ena_vf_count = info->ena_vf_count; cqp->hmc_profile = info->hmc_profile; cqp->ceqs_per_vf = info->ceqs_per_vf; cqp->disable_packed = info->disable_packed; cqp->rocev2_rto_policy = info->rocev2_rto_policy; cqp->protocol_used = info->protocol_used; memcpy(&cqp->dcqcn_params, &info->dcqcn_params, sizeof(cqp->dcqcn_params)); info->dev->cqp = cqp; IRDMA_RING_INIT(cqp->sq_ring, cqp->sq_size); cqp->requested_ops = 0; atomic64_set(&cqp->completed_ops, 0); /* for the cqp commands backlog. */ INIT_LIST_HEAD(&cqp->dev->cqp_cmd_head); writel(0, cqp->dev->hw_regs[IRDMA_CQPTAIL]); writel(0, cqp->dev->hw_regs[IRDMA_CQPDB]); writel(0, cqp->dev->hw_regs[IRDMA_CCQPSTATUS]); ibdev_dbg(to_ibdev(cqp->dev), "WQE: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%pK] cqp[%p] polarity[x%04x]\n", cqp->sq_size, cqp->hw_sq_size, cqp->sq_base, (u64 *)(uintptr_t)cqp->sq_pa, cqp, cqp->polarity); return 0; } /** * irdma_sc_cqp_create - create cqp during bringup * @cqp: struct for cqp hw * @maj_err: If error, major err number * @min_err: If error, minor err number */ int irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err) { u64 temp; u8 hw_rev; u32 cnt = 0, p1, p2, val = 0, err_code; int ret_code; hw_rev = cqp->dev->hw_attrs.uk_attrs.hw_rev; cqp->sdbuf.size = ALIGN(IRDMA_UPDATE_SD_BUFF_SIZE * cqp->sq_size, IRDMA_SD_BUF_ALIGNMENT); cqp->sdbuf.va = dma_alloc_coherent(cqp->dev->hw->device, cqp->sdbuf.size, &cqp->sdbuf.pa, GFP_KERNEL); if (!cqp->sdbuf.va) return -ENOMEM; spin_lock_init(&cqp->dev->cqp_lock); temp = FIELD_PREP(IRDMA_CQPHC_SQSIZE, cqp->hw_sq_size) | FIELD_PREP(IRDMA_CQPHC_SVER, cqp->struct_ver) | FIELD_PREP(IRDMA_CQPHC_DISABLE_PFPDUS, cqp->disable_packed) | FIELD_PREP(IRDMA_CQPHC_CEQPERVF, cqp->ceqs_per_vf); if (hw_rev >= IRDMA_GEN_2) { temp |= FIELD_PREP(IRDMA_CQPHC_ROCEV2_RTO_POLICY, cqp->rocev2_rto_policy) | FIELD_PREP(IRDMA_CQPHC_PROTOCOL_USED, cqp->protocol_used); } set_64bit_val(cqp->host_ctx, 0, temp); set_64bit_val(cqp->host_ctx, 8, cqp->sq_pa); temp = FIELD_PREP(IRDMA_CQPHC_ENABLED_VFS, cqp->ena_vf_count) | FIELD_PREP(IRDMA_CQPHC_HMC_PROFILE, cqp->hmc_profile); set_64bit_val(cqp->host_ctx, 16, temp); set_64bit_val(cqp->host_ctx, 24, (uintptr_t)cqp); temp = FIELD_PREP(IRDMA_CQPHC_HW_MAJVER, cqp->hw_maj_ver) | FIELD_PREP(IRDMA_CQPHC_HW_MINVER, cqp->hw_min_ver); if (hw_rev >= IRDMA_GEN_2) { temp |= FIELD_PREP(IRDMA_CQPHC_MIN_RATE, cqp->dcqcn_params.min_rate) | FIELD_PREP(IRDMA_CQPHC_MIN_DEC_FACTOR, cqp->dcqcn_params.min_dec_factor); } set_64bit_val(cqp->host_ctx, 32, temp); set_64bit_val(cqp->host_ctx, 40, 0); temp = 0; if (hw_rev >= IRDMA_GEN_2) { temp |= FIELD_PREP(IRDMA_CQPHC_DCQCN_T, cqp->dcqcn_params.dcqcn_t) | FIELD_PREP(IRDMA_CQPHC_RAI_FACTOR, cqp->dcqcn_params.rai_factor) | FIELD_PREP(IRDMA_CQPHC_HAI_FACTOR, cqp->dcqcn_params.hai_factor); } set_64bit_val(cqp->host_ctx, 48, temp); temp = 0; if (hw_rev >= IRDMA_GEN_2) { temp |= FIELD_PREP(IRDMA_CQPHC_DCQCN_B, cqp->dcqcn_params.dcqcn_b) | FIELD_PREP(IRDMA_CQPHC_DCQCN_F, cqp->dcqcn_params.dcqcn_f) | FIELD_PREP(IRDMA_CQPHC_CC_CFG_VALID, cqp->dcqcn_params.cc_cfg_valid) | FIELD_PREP(IRDMA_CQPHC_RREDUCE_MPERIOD, cqp->dcqcn_params.rreduce_mperiod); } set_64bit_val(cqp->host_ctx, 56, temp); print_hex_dump_debug("WQE: CQP_HOST_CTX WQE", DUMP_PREFIX_OFFSET, 16, 8, cqp->host_ctx, IRDMA_CQP_CTX_SIZE * 8, false); p1 = cqp->host_ctx_pa >> 32; p2 = (u32)cqp->host_ctx_pa; writel(p1, cqp->dev->hw_regs[IRDMA_CCQPHIGH]); writel(p2, cqp->dev->hw_regs[IRDMA_CCQPLOW]); do { if (cnt++ > cqp->dev->hw_attrs.max_done_count) { ret_code = -ETIMEDOUT; goto err; } udelay(cqp->dev->hw_attrs.max_sleep_count); val = readl(cqp->dev->hw_regs[IRDMA_CCQPSTATUS]); } while (!val); if (FLD_RS_32(cqp->dev, val, IRDMA_CCQPSTATUS_CCQP_ERR)) { ret_code = -EOPNOTSUPP; goto err; } cqp->process_cqp_sds = irdma_update_sds_noccq; return 0; err: dma_free_coherent(cqp->dev->hw->device, cqp->sdbuf.size, cqp->sdbuf.va, cqp->sdbuf.pa); cqp->sdbuf.va = NULL; err_code = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]); *min_err = FIELD_GET(IRDMA_CQPERRCODES_CQP_MINOR_CODE, err_code); *maj_err = FIELD_GET(IRDMA_CQPERRCODES_CQP_MAJOR_CODE, err_code); return ret_code; } /** * irdma_sc_cqp_post_sq - post of cqp's sq * @cqp: struct for cqp hw */ void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp) { writel(IRDMA_RING_CURRENT_HEAD(cqp->sq_ring), cqp->dev->cqp_db); ibdev_dbg(to_ibdev(cqp->dev), "WQE: CQP SQ head 0x%x tail 0x%x size 0x%x\n", cqp->sq_ring.head, cqp->sq_ring.tail, cqp->sq_ring.size); } /** * irdma_sc_cqp_get_next_send_wqe_idx - get next wqe on cqp sq * and pass back index * @cqp: CQP HW structure * @scratch: private data for CQP WQE * @wqe_idx: WQE index of CQP SQ */ __le64 *irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch, u32 *wqe_idx) { __le64 *wqe = NULL; int ret_code; if (IRDMA_RING_FULL_ERR(cqp->sq_ring)) { ibdev_dbg(to_ibdev(cqp->dev), "WQE: CQP SQ is full, head 0x%x tail 0x%x size 0x%x\n", cqp->sq_ring.head, cqp->sq_ring.tail, cqp->sq_ring.size); return NULL; } IRDMA_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, *wqe_idx, ret_code); if (ret_code) return NULL; cqp->requested_ops++; if (!*wqe_idx) cqp->polarity = !cqp->polarity; wqe = cqp->sq_base[*wqe_idx].elem; cqp->scratch_array[*wqe_idx] = scratch; IRDMA_CQP_INIT_WQE(wqe); return wqe; } /** * irdma_sc_cqp_destroy - destroy cqp during close * @cqp: struct for cqp hw */ int irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp) { u32 cnt = 0, val; int ret_code = 0; writel(0, cqp->dev->hw_regs[IRDMA_CCQPHIGH]); writel(0, cqp->dev->hw_regs[IRDMA_CCQPLOW]); do { if (cnt++ > cqp->dev->hw_attrs.max_done_count) { ret_code = -ETIMEDOUT; break; } udelay(cqp->dev->hw_attrs.max_sleep_count); val = readl(cqp->dev->hw_regs[IRDMA_CCQPSTATUS]); } while (FLD_RS_32(cqp->dev, val, IRDMA_CCQPSTATUS_CCQP_DONE)); dma_free_coherent(cqp->dev->hw->device, cqp->sdbuf.size, cqp->sdbuf.va, cqp->sdbuf.pa); cqp->sdbuf.va = NULL; return ret_code; } /** * irdma_sc_ccq_arm - enable intr for control cq * @ccq: ccq sc struct */ void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq) { u64 temp_val; u16 sw_cq_sel; u8 arm_next_se; u8 arm_seq_num; get_64bit_val(ccq->cq_uk.shadow_area, 32, &temp_val); sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val); arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val); arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val); arm_seq_num++; temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) | FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) | FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) | FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, 1); set_64bit_val(ccq->cq_uk.shadow_area, 32, temp_val); dma_wmb(); /* make sure shadow area is updated before arming */ writel(ccq->cq_uk.cq_id, ccq->dev->cq_arm_db); } /** * irdma_sc_ccq_get_cqe_info - get ccq's cq entry * @ccq: ccq sc struct * @info: completion q entry to return */ int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq, struct irdma_ccq_cqe_info *info) { u64 qp_ctx, temp, temp1; __le64 *cqe; struct irdma_sc_cqp *cqp; u32 wqe_idx; u32 error; u8 polarity; int ret_code = 0; if (ccq->cq_uk.avoid_mem_cflct) cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(&ccq->cq_uk); else cqe = IRDMA_GET_CURRENT_CQ_ELEM(&ccq->cq_uk); get_64bit_val(cqe, 24, &temp); polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, temp); if (polarity != ccq->cq_uk.polarity) return -ENOENT; /* Ensure CEQE contents are read after valid bit is checked */ dma_rmb(); get_64bit_val(cqe, 8, &qp_ctx); cqp = (struct irdma_sc_cqp *)(unsigned long)qp_ctx; info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, temp); info->maj_err_code = IRDMA_CQPSQ_MAJ_NO_ERROR; info->min_err_code = (u16)FIELD_GET(IRDMA_CQ_MINERR, temp); if (info->error) { info->maj_err_code = (u16)FIELD_GET(IRDMA_CQ_MAJERR, temp); error = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]); ibdev_dbg(to_ibdev(cqp->dev), "CQP: CQPERRCODES error_code[x%08X]\n", error); } wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, temp); info->scratch = cqp->scratch_array[wqe_idx]; get_64bit_val(cqe, 16, &temp1); info->op_ret_val = (u32)FIELD_GET(IRDMA_CCQ_OPRETVAL, temp1); get_64bit_val(cqp->sq_base[wqe_idx].elem, 24, &temp1); info->op_code = (u8)FIELD_GET(IRDMA_CQPSQ_OPCODE, temp1); info->cqp = cqp; /* move the head for cq */ IRDMA_RING_MOVE_HEAD(ccq->cq_uk.cq_ring, ret_code); if (!IRDMA_RING_CURRENT_HEAD(ccq->cq_uk.cq_ring)) ccq->cq_uk.polarity ^= 1; /* update cq tail in cq shadow memory also */ IRDMA_RING_MOVE_TAIL(ccq->cq_uk.cq_ring); set_64bit_val(ccq->cq_uk.shadow_area, 0, IRDMA_RING_CURRENT_HEAD(ccq->cq_uk.cq_ring)); dma_wmb(); /* make sure shadow area is updated before moving tail */ IRDMA_RING_MOVE_TAIL(cqp->sq_ring); atomic64_inc(&cqp->completed_ops); return ret_code; } /** * irdma_sc_poll_for_cqp_op_done - Waits for last write to complete in CQP SQ * @cqp: struct for cqp hw * @op_code: cqp opcode for completion * @compl_info: completion q entry to return */ int irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 op_code, struct irdma_ccq_cqe_info *compl_info) { struct irdma_ccq_cqe_info info = {}; struct irdma_sc_cq *ccq; int ret_code = 0; u32 cnt = 0; ccq = cqp->dev->ccq; while (1) { if (cnt++ > 100 * cqp->dev->hw_attrs.max_done_count) return -ETIMEDOUT; if (irdma_sc_ccq_get_cqe_info(ccq, &info)) { udelay(cqp->dev->hw_attrs.max_sleep_count); continue; } if (info.error && info.op_code != IRDMA_CQP_OP_QUERY_STAG) { ret_code = -EIO; break; } /* make sure op code matches*/ if (op_code == info.op_code) break; ibdev_dbg(to_ibdev(cqp->dev), "WQE: opcode mismatch for my op code 0x%x, returned opcode %x\n", op_code, info.op_code); } if (compl_info) memcpy(compl_info, &info, sizeof(*compl_info)); return ret_code; } /** * irdma_sc_manage_hmc_pm_func_table - manage of function table * @cqp: struct for cqp hw * @scratch: u64 saved to be used during cqp completion * @info: info for the manage function table operation * @post_sq: flag for cqp db to ring */ static int irdma_sc_manage_hmc_pm_func_table(struct irdma_sc_cqp *cqp, struct irdma_hmc_fcn_info *info, u64 scratch, bool post_sq) { __le64 *wqe; u64 hdr; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) return -ENOMEM; set_64bit_val(wqe, 0, 0); set_64bit_val(wqe, 8, 0); set_64bit_val(wqe, 16, 0); set_64bit_val(wqe, 32, 0); set_64bit_val(wqe, 40, 0); set_64bit_val(wqe, 48, 0); set_64bit_val(wqe, 56, 0); hdr = FIELD_PREP(IRDMA_CQPSQ_MHMC_VFIDX, info->vf_id) | FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE) | FIELD_PREP(IRDMA_CQPSQ_MHMC_FREEPMFN, info->free_fcn) | FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, hdr); print_hex_dump_debug("WQE: MANAGE_HMC_PM_FUNC_TABLE WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); if (post_sq) irdma_sc_cqp_post_sq(cqp); return 0; } /** * irdma_sc_commit_fpm_val_done - wait for cqp eqe completion * for fpm commit * @cqp: struct for cqp hw */ static int irdma_sc_commit_fpm_val_done(struct irdma_sc_cqp *cqp) { return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_COMMIT_FPM_VAL, NULL); } /** * irdma_sc_commit_fpm_val - cqp wqe for commit fpm values * @cqp: struct for cqp hw * @scratch: u64 saved to be used during cqp completion * @hmc_fn_id: hmc function id * @commit_fpm_mem: Memory for fpm values * @post_sq: flag for cqp db to ring * @wait_type: poll ccq or cqp registers for cqp completion */ static int irdma_sc_commit_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch, u8 hmc_fn_id, struct irdma_dma_mem *commit_fpm_mem, bool post_sq, u8 wait_type) { __le64 *wqe; u64 hdr; u32 tail, val, error; int ret_code = 0; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) return -ENOMEM; set_64bit_val(wqe, 16, hmc_fn_id); set_64bit_val(wqe, 32, commit_fpm_mem->pa); hdr = FIELD_PREP(IRDMA_CQPSQ_BUFSIZE, IRDMA_COMMIT_FPM_BUF_SIZE) | FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_COMMIT_FPM_VAL) | FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, hdr); print_hex_dump_debug("WQE: COMMIT_FPM_VAL WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); irdma_get_cqp_reg_info(cqp, &val, &tail, &error); if (post_sq) { irdma_sc_cqp_post_sq(cqp); if (wait_type == IRDMA_CQP_WAIT_POLL_REGS) ret_code = irdma_cqp_poll_registers(cqp, tail, cqp->dev->hw_attrs.max_done_count); else if (wait_type == IRDMA_CQP_WAIT_POLL_CQ) ret_code = irdma_sc_commit_fpm_val_done(cqp); } return ret_code; } /** * irdma_sc_query_fpm_val_done - poll for cqp wqe completion for * query fpm * @cqp: struct for cqp hw */ static int irdma_sc_query_fpm_val_done(struct irdma_sc_cqp *cqp) { return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_QUERY_FPM_VAL, NULL); } /** * irdma_sc_query_fpm_val - cqp wqe query fpm values * @cqp: struct for cqp hw * @scratch: u64 saved to be used during cqp completion * @hmc_fn_id: hmc function id * @query_fpm_mem: memory for return fpm values * @post_sq: flag for cqp db to ring * @wait_type: poll ccq or cqp registers for cqp completion */ static int irdma_sc_query_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch, u8 hmc_fn_id, struct irdma_dma_mem *query_fpm_mem, bool post_sq, u8 wait_type) { __le64 *wqe; u64 hdr; u32 tail, val, error; int ret_code = 0; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) return -ENOMEM; set_64bit_val(wqe, 16, hmc_fn_id); set_64bit_val(wqe, 32, query_fpm_mem->pa); hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_QUERY_FPM_VAL) | FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, hdr); print_hex_dump_debug("WQE: QUERY_FPM WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); irdma_get_cqp_reg_info(cqp, &val, &tail, &error); if (post_sq) { irdma_sc_cqp_post_sq(cqp); if (wait_type == IRDMA_CQP_WAIT_POLL_REGS) ret_code = irdma_cqp_poll_registers(cqp, tail, cqp->dev->hw_attrs.max_done_count); else if (wait_type == IRDMA_CQP_WAIT_POLL_CQ) ret_code = irdma_sc_query_fpm_val_done(cqp); } return ret_code; } /** * irdma_sc_ceq_init - initialize ceq * @ceq: ceq sc structure * @info: ceq initialization info */ int irdma_sc_ceq_init(struct irdma_sc_ceq *ceq, struct irdma_ceq_init_info *info) { u32 pble_obj_cnt; if (info->elem_cnt < info->dev->hw_attrs.min_hw_ceq_size || info->elem_cnt > info->dev->hw_attrs.max_hw_ceq_size) return -EINVAL; if (info->ceq_id >= info->dev->hmc_fpm_misc.max_ceqs) return -EINVAL; pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt; if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt) return -EINVAL; ceq->size = sizeof(*ceq); ceq->ceqe_base = (struct irdma_ceqe *)info->ceqe_base; ceq->ceq_id = info->ceq_id; ceq->dev = info->dev; ceq->elem_cnt = info->elem_cnt; ceq->ceq_elem_pa = info->ceqe_pa; ceq->virtual_map = info->virtual_map; ceq->itr_no_expire = info->itr_no_expire; ceq->reg_cq = info->reg_cq; ceq->reg_cq_size = 0; spin_lock_init(&ceq->req_cq_lock); ceq->pbl_chunk_size = (ceq->virtual_map ? info->pbl_chunk_size : 0); ceq->first_pm_pbl_idx = (ceq->virtual_map ? info->first_pm_pbl_idx : 0); ceq->pbl_list = (ceq->virtual_map ? info->pbl_list : NULL); ceq->tph_en = info->tph_en; ceq->tph_val = info->tph_val; ceq->vsi = info->vsi; ceq->polarity = 1; IRDMA_RING_INIT(ceq->ceq_ring, ceq->elem_cnt); ceq->dev->ceq[info->ceq_id] = ceq; return 0; } /** * irdma_sc_ceq_create - create ceq wqe * @ceq: ceq sc structure * @scratch: u64 saved to be used during cqp completion * @post_sq: flag for cqp db to ring */ static int irdma_sc_ceq_create(struct irdma_sc_ceq *ceq, u64 scratch, bool post_sq) { struct irdma_sc_cqp *cqp; __le64 *wqe; u64 hdr; cqp = ceq->dev->cqp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) return -ENOMEM; set_64bit_val(wqe, 16, ceq->elem_cnt); set_64bit_val(wqe, 32, (ceq->virtual_map ? 0 : ceq->ceq_elem_pa)); set_64bit_val(wqe, 48, (ceq->virtual_map ? ceq->first_pm_pbl_idx : 0)); set_64bit_val(wqe, 56, FIELD_PREP(IRDMA_CQPSQ_TPHVAL, ceq->tph_val) | FIELD_PREP(IRDMA_CQPSQ_VSIIDX, ceq->vsi->vsi_idx)); hdr = FIELD_PREP(IRDMA_CQPSQ_CEQ_CEQID, ceq->ceq_id) | FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CEQ) | FIELD_PREP(IRDMA_CQPSQ_CEQ_LPBLSIZE, ceq->pbl_chunk_size) | FIELD_PREP(IRDMA_CQPSQ_CEQ_VMAP, ceq->virtual_map) | FIELD_PREP(IRDMA_CQPSQ_CEQ_ITRNOEXPIRE, ceq->itr_no_expire) | FIELD_PREP(IRDMA_CQPSQ_TPHEN, ceq->tph_en) | FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, hdr); print_hex_dump_debug("WQE: CEQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); if (post_sq) irdma_sc_cqp_post_sq(cqp); return 0; } /** * irdma_sc_cceq_create_done - poll for control ceq wqe to complete * @ceq: ceq sc structure */ static int irdma_sc_cceq_create_done(struct irdma_sc_ceq *ceq) { struct irdma_sc_cqp *cqp; cqp = ceq->dev->cqp; return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_CREATE_CEQ, NULL); } /** * irdma_sc_cceq_destroy_done - poll for destroy cceq to complete * @ceq: ceq sc structure */ int irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq) { struct irdma_sc_cqp *cqp; if (ceq->reg_cq) irdma_sc_remove_cq_ctx(ceq, ceq->dev->ccq); cqp = ceq->dev->cqp; cqp->process_cqp_sds = irdma_update_sds_noccq; return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_DESTROY_CEQ, NULL); } /** * irdma_sc_cceq_create - create cceq * @ceq: ceq sc structure * @scratch: u64 saved to be used during cqp completion */ int irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch) { int ret_code; struct irdma_sc_dev *dev = ceq->dev; dev->ccq->vsi = ceq->vsi; if (ceq->reg_cq) { ret_code = irdma_sc_add_cq_ctx(ceq, ceq->dev->ccq); if (ret_code) return ret_code; } ret_code = irdma_sc_ceq_create(ceq, scratch, true); if (!ret_code) return irdma_sc_cceq_create_done(ceq); return ret_code; } /** * irdma_sc_ceq_destroy - destroy ceq * @ceq: ceq sc structure * @scratch: u64 saved to be used during cqp completion * @post_sq: flag for cqp db to ring */ int irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch, bool post_sq) { struct irdma_sc_cqp *cqp; __le64 *wqe; u64 hdr; cqp = ceq->dev->cqp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) return -ENOMEM; set_64bit_val(wqe, 16, ceq->elem_cnt); set_64bit_val(wqe, 48, ceq->first_pm_pbl_idx); hdr = ceq->ceq_id | FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CEQ) | FIELD_PREP(IRDMA_CQPSQ_CEQ_LPBLSIZE, ceq->pbl_chunk_size) | FIELD_PREP(IRDMA_CQPSQ_CEQ_VMAP, ceq->virtual_map) | FIELD_PREP(IRDMA_CQPSQ_TPHEN, ceq->tph_en) | FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, hdr); print_hex_dump_debug("WQE: CEQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); if (post_sq) irdma_sc_cqp_post_sq(cqp); return 0; } /** * irdma_sc_process_ceq - process ceq * @dev: sc device struct * @ceq: ceq sc structure * * It is expected caller serializes this function with cleanup_ceqes() * because these functions manipulate the same ceq */ void *irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq) { u64 temp; __le64 *ceqe; struct irdma_sc_cq *cq = NULL; struct irdma_sc_cq *temp_cq; u8 polarity; u32 cq_idx; unsigned long flags; do { cq_idx = 0; ceqe = IRDMA_GET_CURRENT_CEQ_ELEM(ceq); get_64bit_val(ceqe, 0, &temp); polarity = (u8)FIELD_GET(IRDMA_CEQE_VALID, temp); if (polarity != ceq->polarity) return NULL; temp_cq = (struct irdma_sc_cq *)(unsigned long)(temp << 1); if (!temp_cq) { cq_idx = IRDMA_INVALID_CQ_IDX; IRDMA_RING_MOVE_TAIL(ceq->ceq_ring); if (!IRDMA_RING_CURRENT_TAIL(ceq->ceq_ring)) ceq->polarity ^= 1; continue; } cq = temp_cq; if (ceq->reg_cq) { spin_lock_irqsave(&ceq->req_cq_lock, flags); cq_idx = irdma_sc_find_reg_cq(ceq, cq); spin_unlock_irqrestore(&ceq->req_cq_lock, flags); } IRDMA_RING_MOVE_TAIL(ceq->ceq_ring); if (!IRDMA_RING_CURRENT_TAIL(ceq->ceq_ring)) ceq->polarity ^= 1; } while (cq_idx == IRDMA_INVALID_CQ_IDX); if (cq) irdma_sc_cq_ack(cq); return cq; } /** * irdma_sc_cleanup_ceqes - clear the valid ceqes ctx matching the cq * @cq: cq for which the ceqes need to be cleaned up * @ceq: ceq ptr * * The function is called after the cq is destroyed to cleanup * its pending ceqe entries. It is expected caller serializes this * function with process_ceq() in interrupt context. */ void irdma_sc_cleanup_ceqes(struct irdma_sc_cq *cq, struct irdma_sc_ceq *ceq) { struct irdma_sc_cq *next_cq; u8 ceq_polarity = ceq->polarity; __le64 *ceqe; u8 polarity; u64 temp; int next; u32 i; next = IRDMA_RING_GET_NEXT_TAIL(ceq->ceq_ring, 0); for (i = 1; i <= IRDMA_RING_SIZE(*ceq); i++) { ceqe = IRDMA_GET_CEQ_ELEM_AT_POS(ceq, next); get_64bit_val(ceqe, 0, &temp); polarity = (u8)FIELD_GET(IRDMA_CEQE_VALID, temp); if (polarity != ceq_polarity) return; next_cq = (struct irdma_sc_cq *)(unsigned long)(temp << 1); if (cq == next_cq) set_64bit_val(ceqe, 0, temp & IRDMA_CEQE_VALID); next = IRDMA_RING_GET_NEXT_TAIL(ceq->ceq_ring, i); if (!next) ceq_polarity ^= 1; } } /** * irdma_sc_aeq_init - initialize aeq * @aeq: aeq structure ptr * @info: aeq initialization info */ int irdma_sc_aeq_init(struct irdma_sc_aeq *aeq, struct irdma_aeq_init_info *info) { u32 pble_obj_cnt; if (info->elem_cnt < info->dev->hw_attrs.min_hw_aeq_size || info->elem_cnt > info->dev->hw_attrs.max_hw_aeq_size) return -EINVAL; pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt; if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt) return -EINVAL; aeq->size = sizeof(*aeq); aeq->polarity = 1; aeq->aeqe_base = (struct irdma_sc_aeqe *)info->aeqe_base; aeq->dev = info->dev; aeq->elem_cnt = info->elem_cnt; aeq->aeq_elem_pa = info->aeq_elem_pa; IRDMA_RING_INIT(aeq->aeq_ring, aeq->elem_cnt); aeq->virtual_map = info->virtual_map; aeq->pbl_list = (aeq->virtual_map ? info->pbl_list : NULL); aeq->pbl_chunk_size = (aeq->virtual_map ? info->pbl_chunk_size : 0); aeq->first_pm_pbl_idx = (aeq->virtual_map ? info->first_pm_pbl_idx : 0); aeq->msix_idx = info->msix_idx; info->dev->aeq = aeq; return 0; } /** * irdma_sc_aeq_create - create aeq * @aeq: aeq structure ptr * @scratch: u64 saved to be used during cqp completion * @post_sq: flag for cqp db to ring */ static int irdma_sc_aeq_create(struct irdma_sc_aeq *aeq, u64 scratch, bool post_sq) { __le64 *wqe; struct irdma_sc_cqp *cqp; u64 hdr; cqp = aeq->dev->cqp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) return -ENOMEM; set_64bit_val(wqe, 16, aeq->elem_cnt); set_64bit_val(wqe, 32, (aeq->virtual_map ? 0 : aeq->aeq_elem_pa)); set_64bit_val(wqe, 48, (aeq->virtual_map ? aeq->first_pm_pbl_idx : 0)); hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_AEQ) | FIELD_PREP(IRDMA_CQPSQ_AEQ_LPBLSIZE, aeq->pbl_chunk_size) | FIELD_PREP(IRDMA_CQPSQ_AEQ_VMAP, aeq->virtual_map) | FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, hdr); print_hex_dump_debug("WQE: AEQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); if (post_sq) irdma_sc_cqp_post_sq(cqp); return 0; } /** * irdma_sc_aeq_destroy - destroy aeq during close * @aeq: aeq structure ptr * @scratch: u64 saved to be used during cqp completion * @post_sq: flag for cqp db to ring */ static int irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq, u64 scratch, bool post_sq) { __le64 *wqe; struct irdma_sc_cqp *cqp; struct irdma_sc_dev *dev; u64 hdr; dev = aeq->dev; writel(0, dev->hw_regs[IRDMA_PFINT_AEQCTL]); cqp = dev->cqp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) return -ENOMEM; set_64bit_val(wqe, 16, aeq->elem_cnt); set_64bit_val(wqe, 48, aeq->first_pm_pbl_idx); hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_AEQ) | FIELD_PREP(IRDMA_CQPSQ_AEQ_LPBLSIZE, aeq->pbl_chunk_size) | FIELD_PREP(IRDMA_CQPSQ_AEQ_VMAP, aeq->virtual_map) | FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, hdr); print_hex_dump_debug("WQE: AEQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); if (post_sq) irdma_sc_cqp_post_sq(cqp); return 0; } /** * irdma_sc_get_next_aeqe - get next aeq entry * @aeq: aeq structure ptr * @info: aeqe info to be returned */ int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq, struct irdma_aeqe_info *info) { u64 temp, compl_ctx; __le64 *aeqe; u8 ae_src; u8 polarity; aeqe = IRDMA_GET_CURRENT_AEQ_ELEM(aeq); get_64bit_val(aeqe, 8, &temp); polarity = (u8)FIELD_GET(IRDMA_AEQE_VALID, temp); if (aeq->polarity != polarity) return -ENOENT; /* Ensure AEQE contents are read after valid bit is checked */ dma_rmb(); get_64bit_val(aeqe, 0, &compl_ctx); print_hex_dump_debug("WQE: AEQ_ENTRY WQE", DUMP_PREFIX_OFFSET, 16, 8, aeqe, 16, false); ae_src = (u8)FIELD_GET(IRDMA_AEQE_AESRC, temp); info->wqe_idx = (u16)FIELD_GET(IRDMA_AEQE_WQDESCIDX, temp); info->qp_cq_id = (u32)FIELD_GET(IRDMA_AEQE_QPCQID_LOW, temp) | ((u32)FIELD_GET(IRDMA_AEQE_QPCQID_HI, temp) << 18); info->ae_id = (u16)FIELD_GET(IRDMA_AEQE_AECODE, temp); info->tcp_state = (u8)FIELD_GET(IRDMA_AEQE_TCPSTATE, temp); info->iwarp_state = (u8)FIELD_GET(IRDMA_AEQE_IWSTATE, temp); info->q2_data_written = (u8)FIELD_GET(IRDMA_AEQE_Q2DATA, temp); info->aeqe_overflow = (bool)FIELD_GET(IRDMA_AEQE_OVERFLOW, temp); info->ae_src = ae_src; switch (info->ae_id) { case IRDMA_AE_PRIV_OPERATION_DENIED: case IRDMA_AE_AMP_INVALIDATE_TYPE1_MW: case IRDMA_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW: case IRDMA_AE_AMP_FASTREG_INVALID_PBL_HPS_CFG: case IRDMA_AE_AMP_FASTREG_PBLE_MISMATCH: case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG: case IRDMA_AE_UDA_XMIT_BAD_PD: case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT: case IRDMA_AE_BAD_CLOSE: case IRDMA_AE_RDMA_READ_WHILE_ORD_ZERO: case IRDMA_AE_STAG_ZERO_INVALID: case IRDMA_AE_IB_RREQ_AND_Q1_FULL: case IRDMA_AE_IB_INVALID_REQUEST: case IRDMA_AE_WQE_UNEXPECTED_OPCODE: case IRDMA_AE_IB_REMOTE_ACCESS_ERROR: case IRDMA_AE_IB_REMOTE_OP_ERROR: case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION: case IRDMA_AE_DDP_UBE_INVALID_MO: case IRDMA_AE_DDP_UBE_INVALID_QN: case IRDMA_AE_DDP_NO_L_BIT: case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION: case IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE: case IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST: case IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP: case IRDMA_AE_ROCE_RSP_LENGTH_ERROR: case IRDMA_AE_INVALID_ARP_ENTRY: case IRDMA_AE_INVALID_TCP_OPTION_RCVD: case IRDMA_AE_STALE_ARP_ENTRY: case IRDMA_AE_INVALID_AH_ENTRY: case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR: case IRDMA_AE_LLP_SEGMENT_TOO_SMALL: case IRDMA_AE_LLP_TOO_MANY_RETRIES: case IRDMA_AE_LLP_DOUBT_REACHABILITY: case IRDMA_AE_LLP_CONNECTION_ESTABLISHED: case IRDMA_AE_RESET_SENT: case IRDMA_AE_TERMINATE_SENT: case IRDMA_AE_RESET_NOT_SENT: case IRDMA_AE_LCE_QP_CATASTROPHIC: case IRDMA_AE_QP_SUSPEND_COMPLETE: case IRDMA_AE_UDA_L4LEN_INVALID: info->qp = true; info->compl_ctx = compl_ctx; break; case IRDMA_AE_LCE_CQ_CATASTROPHIC: info->cq = true; info->compl_ctx = compl_ctx << 1; ae_src = IRDMA_AE_SOURCE_RSVD; break; case IRDMA_AE_ROCE_EMPTY_MCG: case IRDMA_AE_ROCE_BAD_MC_IP_ADDR: case IRDMA_AE_ROCE_BAD_MC_QPID: case IRDMA_AE_MCG_QP_PROTOCOL_MISMATCH: fallthrough; case IRDMA_AE_LLP_CONNECTION_RESET: case IRDMA_AE_LLP_SYN_RECEIVED: case IRDMA_AE_LLP_FIN_RECEIVED: case IRDMA_AE_LLP_CLOSE_COMPLETE: case IRDMA_AE_LLP_TERMINATE_RECEIVED: case IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE: ae_src = IRDMA_AE_SOURCE_RSVD; info->qp = true; info->compl_ctx = compl_ctx; break; default: break; } switch (ae_src) { case IRDMA_AE_SOURCE_RQ: case IRDMA_AE_SOURCE_RQ_0011: info->qp = true; info->rq = true; info->compl_ctx = compl_ctx; break; case IRDMA_AE_SOURCE_CQ: case IRDMA_AE_SOURCE_CQ_0110: case IRDMA_AE_SOURCE_CQ_1010: case IRDMA_AE_SOURCE_CQ_1110: info->cq = true; info->compl_ctx = compl_ctx << 1; break; case IRDMA_AE_SOURCE_SQ: case IRDMA_AE_SOURCE_SQ_0111: info->qp = true; info->sq = true; info->compl_ctx = compl_ctx; break; case IRDMA_AE_SOURCE_IN_RR_WR: case IRDMA_AE_SOURCE_IN_RR_WR_1011: info->qp = true; info->compl_ctx = compl_ctx; info->in_rdrsp_wr = true; break; case IRDMA_AE_SOURCE_OUT_RR: case IRDMA_AE_SOURCE_OUT_RR_1111: info->qp = true; info->compl_ctx = compl_ctx; info->out_rdrsp = true; break; case IRDMA_AE_SOURCE_RSVD: default: break; } IRDMA_RING_MOVE_TAIL(aeq->aeq_ring); if (!IRDMA_RING_CURRENT_TAIL(aeq->aeq_ring)) aeq->polarity ^= 1; return 0; } /** * irdma_sc_repost_aeq_entries - repost completed aeq entries * @dev: sc device struct * @count: allocate count */ void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count) { writel(count, dev->hw_regs[IRDMA_AEQALLOC]); } /** * irdma_sc_ccq_init - initialize control cq * @cq: sc's cq ctruct * @info: info for control cq initialization */ int irdma_sc_ccq_init(struct irdma_sc_cq *cq, struct irdma_ccq_init_info *info) { u32 pble_obj_cnt; if (info->num_elem < info->dev->hw_attrs.uk_attrs.min_hw_cq_size || info->num_elem > info->dev->hw_attrs.uk_attrs.max_hw_cq_size) return -EINVAL; if (info->ceq_id >= info->dev->hmc_fpm_misc.max_ceqs) return -EINVAL; pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt; if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt) return -EINVAL; cq->cq_pa = info->cq_pa; cq->cq_uk.cq_base = info->cq_base; cq->shadow_area_pa = info->shadow_area_pa; cq->cq_uk.shadow_area = info->shadow_area; cq->shadow_read_threshold = info->shadow_read_threshold; cq->dev = info->dev; cq->ceq_id = info->ceq_id; cq->cq_uk.cq_size = info->num_elem; cq->cq_type = IRDMA_CQ_TYPE_CQP; cq->ceqe_mask = info->ceqe_mask; IRDMA_RING_INIT(cq->cq_uk.cq_ring, info->num_elem); cq->cq_uk.cq_id = 0; /* control cq is id 0 always */ cq->ceq_id_valid = info->ceq_id_valid; cq->tph_en = info->tph_en; cq->tph_val = info->tph_val; cq->cq_uk.avoid_mem_cflct = info->avoid_mem_cflct; cq->pbl_list = info->pbl_list; cq->virtual_map = info->virtual_map; cq->pbl_chunk_size = info->pbl_chunk_size; cq->first_pm_pbl_idx = info->first_pm_pbl_idx; cq->cq_uk.polarity = true; cq->vsi = info->vsi; cq->cq_uk.cq_ack_db = cq->dev->cq_ack_db; /* Only applicable to CQs other than CCQ so initialize to zero */ cq->cq_uk.cqe_alloc_db = NULL; info->dev->ccq = cq; return 0; } /** * irdma_sc_ccq_create_done - poll cqp for ccq create * @ccq: ccq sc struct */ static inline int irdma_sc_ccq_create_done(struct irdma_sc_cq *ccq) { struct irdma_sc_cqp *cqp; cqp = ccq->dev->cqp; return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_CREATE_CQ, NULL); } /** * irdma_sc_ccq_create - create control cq * @ccq: ccq sc struct * @scratch: u64 saved to be used during cqp completion * @check_overflow: overlow flag for ccq * @post_sq: flag for cqp db to ring */ int irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch, bool check_overflow, bool post_sq) { int ret_code; ret_code = irdma_sc_cq_create(ccq, scratch, check_overflow, post_sq); if (ret_code) return ret_code; if (post_sq) { ret_code = irdma_sc_ccq_create_done(ccq); if (ret_code) return ret_code; } ccq->dev->cqp->process_cqp_sds = irdma_cqp_sds_cmd; return 0; } /** * irdma_sc_ccq_destroy - destroy ccq during close * @ccq: ccq sc struct * @scratch: u64 saved to be used during cqp completion * @post_sq: flag for cqp db to ring */ int irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch, bool post_sq) { struct irdma_sc_cqp *cqp; __le64 *wqe; u64 hdr; int ret_code = 0; u32 tail, val, error; cqp = ccq->dev->cqp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) return -ENOMEM; set_64bit_val(wqe, 0, ccq->cq_uk.cq_size); set_64bit_val(wqe, 8, (uintptr_t)ccq >> 1); set_64bit_val(wqe, 40, ccq->shadow_area_pa); hdr = ccq->cq_uk.cq_id | FLD_LS_64(ccq->dev, (ccq->ceq_id_valid ? ccq->ceq_id : 0), IRDMA_CQPSQ_CQ_CEQID) | FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CQ) | FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, ccq->ceqe_mask) | FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, ccq->ceq_id_valid) | FIELD_PREP(IRDMA_CQPSQ_TPHEN, ccq->tph_en) | FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT, ccq->cq_uk.avoid_mem_cflct) | FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, hdr); print_hex_dump_debug("WQE: CCQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); irdma_get_cqp_reg_info(cqp, &val, &tail, &error); if (post_sq) { irdma_sc_cqp_post_sq(cqp); ret_code = irdma_cqp_poll_registers(cqp, tail, cqp->dev->hw_attrs.max_done_count); } cqp->process_cqp_sds = irdma_update_sds_noccq; return ret_code; } /** * irdma_sc_init_iw_hmc() - queries fpm values using cqp and populates hmc_info * @dev : ptr to irdma_dev struct * @hmc_fn_id: hmc function id */ int irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev, u8 hmc_fn_id) { struct irdma_hmc_info *hmc_info; struct irdma_hmc_fpm_misc *hmc_fpm_misc; struct irdma_dma_mem query_fpm_mem; int ret_code = 0; u8 wait_type; hmc_info = dev->hmc_info; hmc_fpm_misc = &dev->hmc_fpm_misc; query_fpm_mem.pa = dev->fpm_query_buf_pa; query_fpm_mem.va = dev->fpm_query_buf; hmc_info->hmc_fn_id = hmc_fn_id; wait_type = (u8)IRDMA_CQP_WAIT_POLL_REGS; ret_code = irdma_sc_query_fpm_val(dev->cqp, 0, hmc_info->hmc_fn_id, &query_fpm_mem, true, wait_type); if (ret_code) return ret_code; /* parse the fpm_query_buf and fill hmc obj info */ ret_code = irdma_sc_parse_fpm_query_buf(dev, query_fpm_mem.va, hmc_info, hmc_fpm_misc); print_hex_dump_debug("HMC: QUERY FPM BUFFER", DUMP_PREFIX_OFFSET, 16, 8, query_fpm_mem.va, IRDMA_QUERY_FPM_BUF_SIZE, false); return ret_code; } /** * irdma_sc_cfg_iw_fpm() - commits hmc obj cnt values using cqp * command and populates fpm base address in hmc_info * @dev : ptr to irdma_dev struct * @hmc_fn_id: hmc function id */ static int irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev, u8 hmc_fn_id) { struct irdma_hmc_info *hmc_info; struct irdma_hmc_obj_info *obj_info; __le64 *buf; struct irdma_dma_mem commit_fpm_mem; int ret_code = 0; u8 wait_type; hmc_info = dev->hmc_info; obj_info = hmc_info->hmc_obj; buf = dev->fpm_commit_buf; set_64bit_val(buf, 0, (u64)obj_info[IRDMA_HMC_IW_QP].cnt); set_64bit_val(buf, 8, (u64)obj_info[IRDMA_HMC_IW_CQ].cnt); set_64bit_val(buf, 16, (u64)0); /* RSRVD */ set_64bit_val(buf, 24, (u64)obj_info[IRDMA_HMC_IW_HTE].cnt); set_64bit_val(buf, 32, (u64)obj_info[IRDMA_HMC_IW_ARP].cnt); set_64bit_val(buf, 40, (u64)0); /* RSVD */ set_64bit_val(buf, 48, (u64)obj_info[IRDMA_HMC_IW_MR].cnt); set_64bit_val(buf, 56, (u64)obj_info[IRDMA_HMC_IW_XF].cnt); set_64bit_val(buf, 64, (u64)obj_info[IRDMA_HMC_IW_XFFL].cnt); set_64bit_val(buf, 72, (u64)obj_info[IRDMA_HMC_IW_Q1].cnt); set_64bit_val(buf, 80, (u64)obj_info[IRDMA_HMC_IW_Q1FL].cnt); set_64bit_val(buf, 88, (u64)obj_info[IRDMA_HMC_IW_TIMER].cnt); set_64bit_val(buf, 96, (u64)obj_info[IRDMA_HMC_IW_FSIMC].cnt); set_64bit_val(buf, 104, (u64)obj_info[IRDMA_HMC_IW_FSIAV].cnt); set_64bit_val(buf, 112, (u64)obj_info[IRDMA_HMC_IW_PBLE].cnt); set_64bit_val(buf, 120, (u64)0); /* RSVD */ set_64bit_val(buf, 128, (u64)obj_info[IRDMA_HMC_IW_RRF].cnt); set_64bit_val(buf, 136, (u64)obj_info[IRDMA_HMC_IW_RRFFL].cnt); set_64bit_val(buf, 144, (u64)obj_info[IRDMA_HMC_IW_HDR].cnt); set_64bit_val(buf, 152, (u64)obj_info[IRDMA_HMC_IW_MD].cnt); set_64bit_val(buf, 160, (u64)obj_info[IRDMA_HMC_IW_OOISC].cnt); set_64bit_val(buf, 168, (u64)obj_info[IRDMA_HMC_IW_OOISCFFL].cnt); commit_fpm_mem.pa = dev->fpm_commit_buf_pa; commit_fpm_mem.va = dev->fpm_commit_buf; wait_type = (u8)IRDMA_CQP_WAIT_POLL_REGS; print_hex_dump_debug("HMC: COMMIT FPM BUFFER", DUMP_PREFIX_OFFSET, 16, 8, commit_fpm_mem.va, IRDMA_COMMIT_FPM_BUF_SIZE, false); ret_code = irdma_sc_commit_fpm_val(dev->cqp, 0, hmc_info->hmc_fn_id, &commit_fpm_mem, true, wait_type); if (!ret_code) irdma_sc_parse_fpm_commit_buf(dev, dev->fpm_commit_buf, hmc_info->hmc_obj, &hmc_info->sd_table.sd_cnt); print_hex_dump_debug("HMC: COMMIT FPM BUFFER", DUMP_PREFIX_OFFSET, 16, 8, commit_fpm_mem.va, IRDMA_COMMIT_FPM_BUF_SIZE, false); return ret_code; } /** * cqp_sds_wqe_fill - fill cqp wqe doe sd * @cqp: struct for cqp hw * @info: sd info for wqe * @scratch: u64 saved to be used during cqp completion */ static int cqp_sds_wqe_fill(struct irdma_sc_cqp *cqp, struct irdma_update_sds_info *info, u64 scratch) { u64 data; u64 hdr; __le64 *wqe; int mem_entries, wqe_entries; struct irdma_dma_mem *sdbuf = &cqp->sdbuf; u64 offset = 0; u32 wqe_idx; wqe = irdma_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx); if (!wqe) return -ENOMEM; wqe_entries = (info->cnt > 3) ? 3 : info->cnt; mem_entries = info->cnt - wqe_entries; if (mem_entries) { offset = wqe_idx * IRDMA_UPDATE_SD_BUFF_SIZE; memcpy(((char *)sdbuf->va + offset), &info->entry[3], mem_entries << 4); data = (u64)sdbuf->pa + offset; } else { data = 0; } data |= FIELD_PREP(IRDMA_CQPSQ_UPESD_HMCFNID, info->hmc_fn_id); set_64bit_val(wqe, 16, data); switch (wqe_entries) { case 3: set_64bit_val(wqe, 48, (FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[2].cmd) | FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_VALID, 1))); set_64bit_val(wqe, 56, info->entry[2].data); fallthrough; case 2: set_64bit_val(wqe, 32, (FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[1].cmd) | FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_VALID, 1))); set_64bit_val(wqe, 40, info->entry[1].data); fallthrough; case 1: set_64bit_val(wqe, 0, FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[0].cmd)); set_64bit_val(wqe, 8, info->entry[0].data); break; default: break; } hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_UPDATE_PE_SDS) | FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) | FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_COUNT, mem_entries); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, hdr); if (mem_entries) print_hex_dump_debug("WQE: UPDATE_PE_SDS WQE Buffer", DUMP_PREFIX_OFFSET, 16, 8, (char *)sdbuf->va + offset, mem_entries << 4, false); print_hex_dump_debug("WQE: UPDATE_PE_SDS WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); return 0; } /** * irdma_update_pe_sds - cqp wqe for sd * @dev: ptr to irdma_dev struct * @info: sd info for sd's * @scratch: u64 saved to be used during cqp completion */ static int irdma_update_pe_sds(struct irdma_sc_dev *dev, struct irdma_update_sds_info *info, u64 scratch) { struct irdma_sc_cqp *cqp = dev->cqp; int ret_code; ret_code = cqp_sds_wqe_fill(cqp, info, scratch); if (!ret_code) irdma_sc_cqp_post_sq(cqp); return ret_code; } /** * irdma_update_sds_noccq - update sd before ccq created * @dev: sc device struct * @info: sd info for sd's */ int irdma_update_sds_noccq(struct irdma_sc_dev *dev, struct irdma_update_sds_info *info) { u32 error, val, tail; struct irdma_sc_cqp *cqp = dev->cqp; int ret_code; ret_code = cqp_sds_wqe_fill(cqp, info, 0); if (ret_code) return ret_code; irdma_get_cqp_reg_info(cqp, &val, &tail, &error); irdma_sc_cqp_post_sq(cqp); return irdma_cqp_poll_registers(cqp, tail, cqp->dev->hw_attrs.max_done_count); } /** * irdma_sc_static_hmc_pages_allocated - cqp wqe to allocate hmc pages * @cqp: struct for cqp hw * @scratch: u64 saved to be used during cqp completion * @hmc_fn_id: hmc function id * @post_sq: flag for cqp db to ring * @poll_registers: flag to poll register for cqp completion */ int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch, u8 hmc_fn_id, bool post_sq, bool poll_registers) { u64 hdr; __le64 *wqe; u32 tail, val, error; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) return -ENOMEM; set_64bit_val(wqe, 16, FIELD_PREP(IRDMA_SHMC_PAGE_ALLOCATED_HMC_FN_ID, hmc_fn_id)); hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_SHMC_PAGES_ALLOCATED) | FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, hdr); print_hex_dump_debug("WQE: SHMC_PAGES_ALLOCATED WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); irdma_get_cqp_reg_info(cqp, &val, &tail, &error); if (post_sq) { irdma_sc_cqp_post_sq(cqp); if (poll_registers) /* check for cqp sq tail update */ return irdma_cqp_poll_registers(cqp, tail, cqp->dev->hw_attrs.max_done_count); else return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_SHMC_PAGES_ALLOCATED, NULL); } return 0; } /** * irdma_cqp_ring_full - check if cqp ring is full * @cqp: struct for cqp hw */ static bool irdma_cqp_ring_full(struct irdma_sc_cqp *cqp) { return IRDMA_RING_FULL_ERR(cqp->sq_ring); } /** * irdma_est_sd - returns approximate number of SDs for HMC * @dev: sc device struct * @hmc_info: hmc structure, size and count for HMC objects */ static u32 irdma_est_sd(struct irdma_sc_dev *dev, struct irdma_hmc_info *hmc_info) { int i; u64 size = 0; u64 sd; for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++) if (i != IRDMA_HMC_IW_PBLE) size += round_up(hmc_info->hmc_obj[i].cnt * hmc_info->hmc_obj[i].size, 512); size += round_up(hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt * hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].size, 512); if (size & 0x1FFFFF) sd = (size >> 21) + 1; /* add 1 for remainder */ else sd = size >> 21; if (sd > 0xFFFFFFFF) { ibdev_dbg(to_ibdev(dev), "HMC: sd overflow[%lld]\n", sd); sd = 0xFFFFFFFF - 1; } return (u32)sd; } /** * irdma_sc_query_rdma_features_done - poll cqp for query features done * @cqp: struct for cqp hw */ static int irdma_sc_query_rdma_features_done(struct irdma_sc_cqp *cqp) { return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_QUERY_RDMA_FEATURES, NULL); } /** * irdma_sc_query_rdma_features - query RDMA features and FW ver * @cqp: struct for cqp hw * @buf: buffer to hold query info * @scratch: u64 saved to be used during cqp completion */ static int irdma_sc_query_rdma_features(struct irdma_sc_cqp *cqp, struct irdma_dma_mem *buf, u64 scratch) { __le64 *wqe; u64 temp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) return -ENOMEM; temp = buf->pa; set_64bit_val(wqe, 32, temp); temp = FIELD_PREP(IRDMA_CQPSQ_QUERY_RDMA_FEATURES_WQEVALID, cqp->polarity) | FIELD_PREP(IRDMA_CQPSQ_QUERY_RDMA_FEATURES_BUF_LEN, buf->size) | FIELD_PREP(IRDMA_CQPSQ_UP_OP, IRDMA_CQP_OP_QUERY_RDMA_FEATURES); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, temp); print_hex_dump_debug("WQE: QUERY RDMA FEATURES", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); irdma_sc_cqp_post_sq(cqp); return 0; } /** * irdma_get_rdma_features - get RDMA features * @dev: sc device struct */ int irdma_get_rdma_features(struct irdma_sc_dev *dev) { int ret_code; struct irdma_dma_mem feat_buf; u64 temp; u16 byte_idx, feat_type, feat_cnt, feat_idx; feat_buf.size = ALIGN(IRDMA_FEATURE_BUF_SIZE, IRDMA_FEATURE_BUF_ALIGNMENT); feat_buf.va = dma_alloc_coherent(dev->hw->device, feat_buf.size, &feat_buf.pa, GFP_KERNEL); if (!feat_buf.va) return -ENOMEM; ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0); if (!ret_code) ret_code = irdma_sc_query_rdma_features_done(dev->cqp); if (ret_code) goto exit; get_64bit_val(feat_buf.va, 0, &temp); feat_cnt = (u16)FIELD_GET(IRDMA_FEATURE_CNT, temp); if (feat_cnt < 2) { ret_code = -EINVAL; goto exit; } else if (feat_cnt > IRDMA_MAX_FEATURES) { ibdev_dbg(to_ibdev(dev), "DEV: feature buf size insufficient, retrying with larger buffer\n"); dma_free_coherent(dev->hw->device, feat_buf.size, feat_buf.va, feat_buf.pa); feat_buf.va = NULL; feat_buf.size = ALIGN(8 * feat_cnt, IRDMA_FEATURE_BUF_ALIGNMENT); feat_buf.va = dma_alloc_coherent(dev->hw->device, feat_buf.size, &feat_buf.pa, GFP_KERNEL); if (!feat_buf.va) return -ENOMEM; ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0); if (!ret_code) ret_code = irdma_sc_query_rdma_features_done(dev->cqp); if (ret_code) goto exit; get_64bit_val(feat_buf.va, 0, &temp); feat_cnt = (u16)FIELD_GET(IRDMA_FEATURE_CNT, temp); if (feat_cnt < 2) { ret_code = -EINVAL; goto exit; } } print_hex_dump_debug("WQE: QUERY RDMA FEATURES", DUMP_PREFIX_OFFSET, 16, 8, feat_buf.va, feat_cnt * 8, false); for (byte_idx = 0, feat_idx = 0; feat_idx < min(feat_cnt, (u16)IRDMA_MAX_FEATURES); feat_idx++, byte_idx += 8) { get_64bit_val(feat_buf.va, byte_idx, &temp); feat_type = FIELD_GET(IRDMA_FEATURE_TYPE, temp); if (feat_type >= IRDMA_MAX_FEATURES) { ibdev_dbg(to_ibdev(dev), "DEV: found unrecognized feature type %d\n", feat_type); continue; } dev->feature_info[feat_type] = temp; } exit: dma_free_coherent(dev->hw->device, feat_buf.size, feat_buf.va, feat_buf.pa); feat_buf.va = NULL; return ret_code; } static u32 irdma_q1_cnt(struct irdma_sc_dev *dev, struct irdma_hmc_info *hmc_info, u32 qpwanted) { u32 q1_cnt; if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) { q1_cnt = roundup_pow_of_two(dev->hw_attrs.max_hw_ird * 2 * qpwanted); } else { if (dev->cqp->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY) q1_cnt = roundup_pow_of_two(dev->hw_attrs.max_hw_ird * 2 * qpwanted + 512); else q1_cnt = dev->hw_attrs.max_hw_ird * 2 * qpwanted; } return q1_cnt; } static void cfg_fpm_value_gen_1(struct irdma_sc_dev *dev, struct irdma_hmc_info *hmc_info, u32 qpwanted) { hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt = roundup_pow_of_two(qpwanted * dev->hw_attrs.max_hw_wqes); } static void cfg_fpm_value_gen_2(struct irdma_sc_dev *dev, struct irdma_hmc_info *hmc_info, u32 qpwanted) { struct irdma_hmc_fpm_misc *hmc_fpm_misc = &dev->hmc_fpm_misc; hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt = 4 * hmc_fpm_misc->xf_block_size * qpwanted; hmc_info->hmc_obj[IRDMA_HMC_IW_HDR].cnt = qpwanted; if (hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].max_cnt) hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt = 32 * qpwanted; if (hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].max_cnt) hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].cnt = hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt / hmc_fpm_misc->rrf_block_size; if (hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].max_cnt) hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].cnt = 32 * qpwanted; if (hmc_info->hmc_obj[IRDMA_HMC_IW_OOISCFFL].max_cnt) hmc_info->hmc_obj[IRDMA_HMC_IW_OOISCFFL].cnt = hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].cnt / hmc_fpm_misc->ooiscf_block_size; } /** * irdma_cfg_fpm_val - configure HMC objects * @dev: sc device struct * @qp_count: desired qp count */ int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count) { struct irdma_virt_mem virt_mem; u32 i, mem_size; u32 qpwanted, mrwanted, pblewanted; u32 powerof2, hte; u32 sd_needed; u32 sd_diff; u32 loop_count = 0; struct irdma_hmc_info *hmc_info; struct irdma_hmc_fpm_misc *hmc_fpm_misc; int ret_code = 0; hmc_info = dev->hmc_info; hmc_fpm_misc = &dev->hmc_fpm_misc; ret_code = irdma_sc_init_iw_hmc(dev, dev->hmc_fn_id); if (ret_code) { ibdev_dbg(to_ibdev(dev), "HMC: irdma_sc_init_iw_hmc returned error_code = %d\n", ret_code); return ret_code; } for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++) hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt; sd_needed = irdma_est_sd(dev, hmc_info); ibdev_dbg(to_ibdev(dev), "HMC: FW max resources sd_needed[%08d] first_sd_index[%04d]\n", sd_needed, hmc_info->first_sd_index); ibdev_dbg(to_ibdev(dev), "HMC: sd count %d where max sd is %d\n", hmc_info->sd_table.sd_cnt, hmc_fpm_misc->max_sds); qpwanted = min(qp_count, hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt); powerof2 = 1; while (powerof2 <= qpwanted) powerof2 *= 2; powerof2 /= 2; qpwanted = powerof2; mrwanted = hmc_info->hmc_obj[IRDMA_HMC_IW_MR].max_cnt; pblewanted = hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].max_cnt; ibdev_dbg(to_ibdev(dev), "HMC: req_qp=%d max_sd=%d, max_qp = %d, max_cq=%d, max_mr=%d, max_pble=%d, mc=%d, av=%d\n", qp_count, hmc_fpm_misc->max_sds, hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt, hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt, hmc_info->hmc_obj[IRDMA_HMC_IW_MR].max_cnt, hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].max_cnt, hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].max_cnt, hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt); hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt = hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].max_cnt; hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt = hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt; hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt = hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].max_cnt; hmc_info->hmc_obj[IRDMA_HMC_IW_APBVT_ENTRY].cnt = 1; while (irdma_q1_cnt(dev, hmc_info, qpwanted) > hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].max_cnt) qpwanted /= 2; do { ++loop_count; hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt = qpwanted; hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt = min(2 * qpwanted, hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt); hmc_info->hmc_obj[IRDMA_HMC_IW_RESERVED].cnt = 0; /* Reserved */ hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt = mrwanted; hte = round_up(qpwanted + hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt, 512); powerof2 = 1; while (powerof2 < hte) powerof2 *= 2; hmc_info->hmc_obj[IRDMA_HMC_IW_HTE].cnt = powerof2 * hmc_fpm_misc->ht_multiplier; if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) cfg_fpm_value_gen_1(dev, hmc_info, qpwanted); else cfg_fpm_value_gen_2(dev, hmc_info, qpwanted); hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt = irdma_q1_cnt(dev, hmc_info, qpwanted); hmc_info->hmc_obj[IRDMA_HMC_IW_XFFL].cnt = hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size; hmc_info->hmc_obj[IRDMA_HMC_IW_Q1FL].cnt = hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt / hmc_fpm_misc->q1_block_size; hmc_info->hmc_obj[IRDMA_HMC_IW_TIMER].cnt = (round_up(qpwanted, 512) / 512 + 1) * hmc_fpm_misc->timer_bucket; hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt = pblewanted; sd_needed = irdma_est_sd(dev, hmc_info); ibdev_dbg(to_ibdev(dev), "HMC: sd_needed = %d, hmc_fpm_misc->max_sds=%d, mrwanted=%d, pblewanted=%d qpwanted=%d\n", sd_needed, hmc_fpm_misc->max_sds, mrwanted, pblewanted, qpwanted); /* Do not reduce resources further. All objects fit with max SDs */ if (sd_needed <= hmc_fpm_misc->max_sds) break; sd_diff = sd_needed - hmc_fpm_misc->max_sds; if (sd_diff > 128) { if (!(loop_count % 2) && qpwanted > 128) { qpwanted /= 2; } else { mrwanted /= 2; pblewanted /= 2; } continue; } if (dev->cqp->hmc_profile != IRDMA_HMC_PROFILE_FAVOR_VF && pblewanted > (512 * FPM_MULTIPLIER * sd_diff)) { pblewanted -= 256 * FPM_MULTIPLIER * sd_diff; continue; } else if (pblewanted > (100 * FPM_MULTIPLIER)) { pblewanted -= 10 * FPM_MULTIPLIER; } else if (pblewanted > FPM_MULTIPLIER) { pblewanted -= FPM_MULTIPLIER; } else if (qpwanted <= 128) { if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt > 256) hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt /= 2; if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt > 256) hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt /= 2; } if (mrwanted > FPM_MULTIPLIER) mrwanted -= FPM_MULTIPLIER; if (!(loop_count % 10) && qpwanted > 128) { qpwanted /= 2; if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt > 256) hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt /= 2; } } while (loop_count < 2000); if (sd_needed > hmc_fpm_misc->max_sds) { ibdev_dbg(to_ibdev(dev), "HMC: cfg_fpm failed loop_cnt=%d, sd_needed=%d, max sd count %d\n", loop_count, sd_needed, hmc_info->sd_table.sd_cnt); return -EINVAL; } if (loop_count > 1 && sd_needed < hmc_fpm_misc->max_sds) { pblewanted += (hmc_fpm_misc->max_sds - sd_needed) * 256 * FPM_MULTIPLIER; hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt = pblewanted; sd_needed = irdma_est_sd(dev, hmc_info); } ibdev_dbg(to_ibdev(dev), "HMC: loop_cnt=%d, sd_needed=%d, qpcnt = %d, cqcnt=%d, mrcnt=%d, pblecnt=%d, mc=%d, ah=%d, max sd count %d, first sd index %d\n", loop_count, sd_needed, hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt, hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt, hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt, hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt, hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt, hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt, hmc_info->sd_table.sd_cnt, hmc_info->first_sd_index); ret_code = irdma_sc_cfg_iw_fpm(dev, dev->hmc_fn_id); if (ret_code) { ibdev_dbg(to_ibdev(dev), "HMC: cfg_iw_fpm returned error_code[x%08X]\n", readl(dev->hw_regs[IRDMA_CQPERRCODES])); return ret_code; } mem_size = sizeof(struct irdma_hmc_sd_entry) * (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index + 1); virt_mem.size = mem_size; virt_mem.va = kzalloc(virt_mem.size, GFP_KERNEL); if (!virt_mem.va) { ibdev_dbg(to_ibdev(dev), "HMC: failed to allocate memory for sd_entry buffer\n"); return -ENOMEM; } hmc_info->sd_table.sd_entry = virt_mem.va; return ret_code; } /** * irdma_exec_cqp_cmd - execute cqp cmd when wqe are available * @dev: rdma device * @pcmdinfo: cqp command info */ static int irdma_exec_cqp_cmd(struct irdma_sc_dev *dev, struct cqp_cmds_info *pcmdinfo) { int status; struct irdma_dma_mem val_mem; bool alloc = false; dev->cqp_cmd_stats[pcmdinfo->cqp_cmd]++; switch (pcmdinfo->cqp_cmd) { case IRDMA_OP_CEQ_DESTROY: status = irdma_sc_ceq_destroy(pcmdinfo->in.u.ceq_destroy.ceq, pcmdinfo->in.u.ceq_destroy.scratch, pcmdinfo->post_sq); break; case IRDMA_OP_AEQ_DESTROY: status = irdma_sc_aeq_destroy(pcmdinfo->in.u.aeq_destroy.aeq, pcmdinfo->in.u.aeq_destroy.scratch, pcmdinfo->post_sq); break; case IRDMA_OP_CEQ_CREATE: status = irdma_sc_ceq_create(pcmdinfo->in.u.ceq_create.ceq, pcmdinfo->in.u.ceq_create.scratch, pcmdinfo->post_sq); break; case IRDMA_OP_AEQ_CREATE: status = irdma_sc_aeq_create(pcmdinfo->in.u.aeq_create.aeq, pcmdinfo->in.u.aeq_create.scratch, pcmdinfo->post_sq); break; case IRDMA_OP_QP_UPLOAD_CONTEXT: status = irdma_sc_qp_upload_context(pcmdinfo->in.u.qp_upload_context.dev, &pcmdinfo->in.u.qp_upload_context.info, pcmdinfo->in.u.qp_upload_context.scratch, pcmdinfo->post_sq); break; case IRDMA_OP_CQ_CREATE: status = irdma_sc_cq_create(pcmdinfo->in.u.cq_create.cq, pcmdinfo->in.u.cq_create.scratch, pcmdinfo->in.u.cq_create.check_overflow, pcmdinfo->post_sq); break; case IRDMA_OP_CQ_MODIFY: status = irdma_sc_cq_modify(pcmdinfo->in.u.cq_modify.cq, &pcmdinfo->in.u.cq_modify.info, pcmdinfo->in.u.cq_modify.scratch, pcmdinfo->post_sq); break; case IRDMA_OP_CQ_DESTROY: status = irdma_sc_cq_destroy(pcmdinfo->in.u.cq_destroy.cq, pcmdinfo->in.u.cq_destroy.scratch, pcmdinfo->post_sq); break; case IRDMA_OP_QP_FLUSH_WQES: status = irdma_sc_qp_flush_wqes(pcmdinfo->in.u.qp_flush_wqes.qp, &pcmdinfo->in.u.qp_flush_wqes.info, pcmdinfo->in.u.qp_flush_wqes.scratch, pcmdinfo->post_sq); break; case IRDMA_OP_GEN_AE: status = irdma_sc_gen_ae(pcmdinfo->in.u.gen_ae.qp, &pcmdinfo->in.u.gen_ae.info, pcmdinfo->in.u.gen_ae.scratch, pcmdinfo->post_sq); break; case IRDMA_OP_MANAGE_PUSH_PAGE: status = irdma_sc_manage_push_page(pcmdinfo->in.u.manage_push_page.cqp, &pcmdinfo->in.u.manage_push_page.info, pcmdinfo->in.u.manage_push_page.scratch, pcmdinfo->post_sq); break; case IRDMA_OP_UPDATE_PE_SDS: status = irdma_update_pe_sds(pcmdinfo->in.u.update_pe_sds.dev, &pcmdinfo->in.u.update_pe_sds.info, pcmdinfo->in.u.update_pe_sds.scratch); break; case IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE: /* switch to calling through the call table */ status = irdma_sc_manage_hmc_pm_func_table(pcmdinfo->in.u.manage_hmc_pm.dev->cqp, &pcmdinfo->in.u.manage_hmc_pm.info, pcmdinfo->in.u.manage_hmc_pm.scratch, true); break; case IRDMA_OP_SUSPEND: status = irdma_sc_suspend_qp(pcmdinfo->in.u.suspend_resume.cqp, pcmdinfo->in.u.suspend_resume.qp, pcmdinfo->in.u.suspend_resume.scratch); break; case IRDMA_OP_RESUME: status = irdma_sc_resume_qp(pcmdinfo->in.u.suspend_resume.cqp, pcmdinfo->in.u.suspend_resume.qp, pcmdinfo->in.u.suspend_resume.scratch); break; case IRDMA_OP_QUERY_FPM_VAL: val_mem.pa = pcmdinfo->in.u.query_fpm_val.fpm_val_pa; val_mem.va = pcmdinfo->in.u.query_fpm_val.fpm_val_va; status = irdma_sc_query_fpm_val(pcmdinfo->in.u.query_fpm_val.cqp, pcmdinfo->in.u.query_fpm_val.scratch, pcmdinfo->in.u.query_fpm_val.hmc_fn_id, &val_mem, true, IRDMA_CQP_WAIT_EVENT); break; case IRDMA_OP_COMMIT_FPM_VAL: val_mem.pa = pcmdinfo->in.u.commit_fpm_val.fpm_val_pa; val_mem.va = pcmdinfo->in.u.commit_fpm_val.fpm_val_va; status = irdma_sc_commit_fpm_val(pcmdinfo->in.u.commit_fpm_val.cqp, pcmdinfo->in.u.commit_fpm_val.scratch, pcmdinfo->in.u.commit_fpm_val.hmc_fn_id, &val_mem, true, IRDMA_CQP_WAIT_EVENT); break; case IRDMA_OP_STATS_ALLOCATE: alloc = true; fallthrough; case IRDMA_OP_STATS_FREE: status = irdma_sc_manage_stats_inst(pcmdinfo->in.u.stats_manage.cqp, &pcmdinfo->in.u.stats_manage.info, alloc, pcmdinfo->in.u.stats_manage.scratch); break; case IRDMA_OP_STATS_GATHER: status = irdma_sc_gather_stats(pcmdinfo->in.u.stats_gather.cqp, &pcmdinfo->in.u.stats_gather.info, pcmdinfo->in.u.stats_gather.scratch); break; case IRDMA_OP_WS_MODIFY_NODE: status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp, &pcmdinfo->in.u.ws_node.info, IRDMA_MODIFY_NODE, pcmdinfo->in.u.ws_node.scratch); break; case IRDMA_OP_WS_DELETE_NODE: status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp, &pcmdinfo->in.u.ws_node.info, IRDMA_DEL_NODE, pcmdinfo->in.u.ws_node.scratch); break; case IRDMA_OP_WS_ADD_NODE: status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp, &pcmdinfo->in.u.ws_node.info, IRDMA_ADD_NODE, pcmdinfo->in.u.ws_node.scratch); break; case IRDMA_OP_SET_UP_MAP: status = irdma_sc_set_up_map(pcmdinfo->in.u.up_map.cqp, &pcmdinfo->in.u.up_map.info, pcmdinfo->in.u.up_map.scratch); break; case IRDMA_OP_QUERY_RDMA_FEATURES: status = irdma_sc_query_rdma_features(pcmdinfo->in.u.query_rdma.cqp, &pcmdinfo->in.u.query_rdma.query_buff_mem, pcmdinfo->in.u.query_rdma.scratch); break; case IRDMA_OP_DELETE_ARP_CACHE_ENTRY: status = irdma_sc_del_arp_cache_entry(pcmdinfo->in.u.del_arp_cache_entry.cqp, pcmdinfo->in.u.del_arp_cache_entry.scratch, pcmdinfo->in.u.del_arp_cache_entry.arp_index, pcmdinfo->post_sq); break; case IRDMA_OP_MANAGE_APBVT_ENTRY: status = irdma_sc_manage_apbvt_entry(pcmdinfo->in.u.manage_apbvt_entry.cqp, &pcmdinfo->in.u.manage_apbvt_entry.info, pcmdinfo->in.u.manage_apbvt_entry.scratch, pcmdinfo->post_sq); break; case IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY: status = irdma_sc_manage_qhash_table_entry(pcmdinfo->in.u.manage_qhash_table_entry.cqp, &pcmdinfo->in.u.manage_qhash_table_entry.info, pcmdinfo->in.u.manage_qhash_table_entry.scratch, pcmdinfo->post_sq); break; case IRDMA_OP_QP_MODIFY: status = irdma_sc_qp_modify(pcmdinfo->in.u.qp_modify.qp, &pcmdinfo->in.u.qp_modify.info, pcmdinfo->in.u.qp_modify.scratch, pcmdinfo->post_sq); break; case IRDMA_OP_QP_CREATE: status = irdma_sc_qp_create(pcmdinfo->in.u.qp_create.qp, &pcmdinfo->in.u.qp_create.info, pcmdinfo->in.u.qp_create.scratch, pcmdinfo->post_sq); break; case IRDMA_OP_QP_DESTROY: status = irdma_sc_qp_destroy(pcmdinfo->in.u.qp_destroy.qp, pcmdinfo->in.u.qp_destroy.scratch, pcmdinfo->in.u.qp_destroy.remove_hash_idx, pcmdinfo->in.u.qp_destroy.ignore_mw_bnd, pcmdinfo->post_sq); break; case IRDMA_OP_ALLOC_STAG: status = irdma_sc_alloc_stag(pcmdinfo->in.u.alloc_stag.dev, &pcmdinfo->in.u.alloc_stag.info, pcmdinfo->in.u.alloc_stag.scratch, pcmdinfo->post_sq); break; case IRDMA_OP_MR_REG_NON_SHARED: status = irdma_sc_mr_reg_non_shared(pcmdinfo->in.u.mr_reg_non_shared.dev, &pcmdinfo->in.u.mr_reg_non_shared.info, pcmdinfo->in.u.mr_reg_non_shared.scratch, pcmdinfo->post_sq); break; case IRDMA_OP_DEALLOC_STAG: status = irdma_sc_dealloc_stag(pcmdinfo->in.u.dealloc_stag.dev, &pcmdinfo->in.u.dealloc_stag.info, pcmdinfo->in.u.dealloc_stag.scratch, pcmdinfo->post_sq); break; case IRDMA_OP_MW_ALLOC: status = irdma_sc_mw_alloc(pcmdinfo->in.u.mw_alloc.dev, &pcmdinfo->in.u.mw_alloc.info, pcmdinfo->in.u.mw_alloc.scratch, pcmdinfo->post_sq); break; case IRDMA_OP_ADD_ARP_CACHE_ENTRY: status = irdma_sc_add_arp_cache_entry(pcmdinfo->in.u.add_arp_cache_entry.cqp, &pcmdinfo->in.u.add_arp_cache_entry.info, pcmdinfo->in.u.add_arp_cache_entry.scratch, pcmdinfo->post_sq); break; case IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY: status = irdma_sc_alloc_local_mac_entry(pcmdinfo->in.u.alloc_local_mac_entry.cqp, pcmdinfo->in.u.alloc_local_mac_entry.scratch, pcmdinfo->post_sq); break; case IRDMA_OP_ADD_LOCAL_MAC_ENTRY: status = irdma_sc_add_local_mac_entry(pcmdinfo->in.u.add_local_mac_entry.cqp, &pcmdinfo->in.u.add_local_mac_entry.info, pcmdinfo->in.u.add_local_mac_entry.scratch, pcmdinfo->post_sq); break; case IRDMA_OP_DELETE_LOCAL_MAC_ENTRY: status = irdma_sc_del_local_mac_entry(pcmdinfo->in.u.del_local_mac_entry.cqp, pcmdinfo->in.u.del_local_mac_entry.scratch, pcmdinfo->in.u.del_local_mac_entry.entry_idx, pcmdinfo->in.u.del_local_mac_entry.ignore_ref_count, pcmdinfo->post_sq); break; case IRDMA_OP_AH_CREATE: status = irdma_sc_create_ah(pcmdinfo->in.u.ah_create.cqp, &pcmdinfo->in.u.ah_create.info, pcmdinfo->in.u.ah_create.scratch); break; case IRDMA_OP_AH_DESTROY: status = irdma_sc_destroy_ah(pcmdinfo->in.u.ah_destroy.cqp, &pcmdinfo->in.u.ah_destroy.info, pcmdinfo->in.u.ah_destroy.scratch); break; case IRDMA_OP_MC_CREATE: status = irdma_sc_create_mcast_grp(pcmdinfo->in.u.mc_create.cqp, &pcmdinfo->in.u.mc_create.info, pcmdinfo->in.u.mc_create.scratch); break; case IRDMA_OP_MC_DESTROY: status = irdma_sc_destroy_mcast_grp(pcmdinfo->in.u.mc_destroy.cqp, &pcmdinfo->in.u.mc_destroy.info, pcmdinfo->in.u.mc_destroy.scratch); break; case IRDMA_OP_MC_MODIFY: status = irdma_sc_modify_mcast_grp(pcmdinfo->in.u.mc_modify.cqp, &pcmdinfo->in.u.mc_modify.info, pcmdinfo->in.u.mc_modify.scratch); break; default: status = -EOPNOTSUPP; break; } return status; } /** * irdma_process_cqp_cmd - process all cqp commands * @dev: sc device struct * @pcmdinfo: cqp command info */ int irdma_process_cqp_cmd(struct irdma_sc_dev *dev, struct cqp_cmds_info *pcmdinfo) { int status = 0; unsigned long flags; spin_lock_irqsave(&dev->cqp_lock, flags); if (list_empty(&dev->cqp_cmd_head) && !irdma_cqp_ring_full(dev->cqp)) status = irdma_exec_cqp_cmd(dev, pcmdinfo); else list_add_tail(&pcmdinfo->cqp_cmd_entry, &dev->cqp_cmd_head); spin_unlock_irqrestore(&dev->cqp_lock, flags); return status; } /** * irdma_process_bh - called from tasklet for cqp list * @dev: sc device struct */ int irdma_process_bh(struct irdma_sc_dev *dev) { int status = 0; struct cqp_cmds_info *pcmdinfo; unsigned long flags; spin_lock_irqsave(&dev->cqp_lock, flags); while (!list_empty(&dev->cqp_cmd_head) && !irdma_cqp_ring_full(dev->cqp)) { pcmdinfo = (struct cqp_cmds_info *)irdma_remove_cqp_head(dev); status = irdma_exec_cqp_cmd(dev, pcmdinfo); if (status) break; } spin_unlock_irqrestore(&dev->cqp_lock, flags); return status; } /** * irdma_cfg_aeq- Configure AEQ interrupt * @dev: pointer to the device structure * @idx: vector index * @enable: True to enable, False disables */ void irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable) { u32 reg_val; reg_val = FIELD_PREP(IRDMA_PFINT_AEQCTL_CAUSE_ENA, enable) | FIELD_PREP(IRDMA_PFINT_AEQCTL_MSIX_INDX, idx) | FIELD_PREP(IRDMA_PFINT_AEQCTL_ITR_INDX, 3); writel(reg_val, dev->hw_regs[IRDMA_PFINT_AEQCTL]); } /** * sc_vsi_update_stats - Update statistics * @vsi: sc_vsi instance to update */ void sc_vsi_update_stats(struct irdma_sc_vsi *vsi) { struct irdma_gather_stats *gather_stats; struct irdma_gather_stats *last_gather_stats; gather_stats = vsi->pestat->gather_info.gather_stats_va; last_gather_stats = vsi->pestat->gather_info.last_gather_stats_va; irdma_update_stats(&vsi->pestat->hw_stats, gather_stats, last_gather_stats, vsi->dev->hw_stats_map, vsi->dev->hw_attrs.max_stat_idx); } /** * irdma_wait_pe_ready - Check if firmware is ready * @dev: provides access to registers */ static int irdma_wait_pe_ready(struct irdma_sc_dev *dev) { u32 statuscpu0; u32 statuscpu1; u32 statuscpu2; u32 retrycount = 0; do { statuscpu0 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS0]); statuscpu1 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS1]); statuscpu2 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS2]); if (statuscpu0 == 0x80 && statuscpu1 == 0x80 && statuscpu2 == 0x80) return 0; mdelay(1000); } while (retrycount++ < dev->hw_attrs.max_pe_ready_count); return -1; } static inline void irdma_sc_init_hw(struct irdma_sc_dev *dev) { switch (dev->hw_attrs.uk_attrs.hw_rev) { case IRDMA_GEN_1: i40iw_init_hw(dev); break; case IRDMA_GEN_2: icrdma_init_hw(dev); break; } } /** * irdma_sc_dev_init - Initialize control part of device * @ver: version * @dev: Device pointer * @info: Device init info */ int irdma_sc_dev_init(enum irdma_vers ver, struct irdma_sc_dev *dev, struct irdma_device_init_info *info) { u32 val; int ret_code = 0; u8 db_size; INIT_LIST_HEAD(&dev->cqp_cmd_head); /* for CQP command backlog */ mutex_init(&dev->ws_mutex); dev->hmc_fn_id = info->hmc_fn_id; dev->fpm_query_buf_pa = info->fpm_query_buf_pa; dev->fpm_query_buf = info->fpm_query_buf; dev->fpm_commit_buf_pa = info->fpm_commit_buf_pa; dev->fpm_commit_buf = info->fpm_commit_buf; dev->hw = info->hw; dev->hw->hw_addr = info->bar0; /* Setup the hardware limits, hmc may limit further */ dev->hw_attrs.min_hw_qp_id = IRDMA_MIN_IW_QP_ID; dev->hw_attrs.min_hw_aeq_size = IRDMA_MIN_AEQ_ENTRIES; dev->hw_attrs.max_hw_aeq_size = IRDMA_MAX_AEQ_ENTRIES; dev->hw_attrs.min_hw_ceq_size = IRDMA_MIN_CEQ_ENTRIES; dev->hw_attrs.max_hw_ceq_size = IRDMA_MAX_CEQ_ENTRIES; dev->hw_attrs.uk_attrs.min_hw_cq_size = IRDMA_MIN_CQ_SIZE; dev->hw_attrs.uk_attrs.max_hw_cq_size = IRDMA_MAX_CQ_SIZE; dev->hw_attrs.uk_attrs.max_hw_wq_frags = IRDMA_MAX_WQ_FRAGMENT_COUNT; dev->hw_attrs.uk_attrs.max_hw_read_sges = IRDMA_MAX_SGE_RD; dev->hw_attrs.max_hw_outbound_msg_size = IRDMA_MAX_OUTBOUND_MSG_SIZE; dev->hw_attrs.max_mr_size = IRDMA_MAX_MR_SIZE; dev->hw_attrs.max_hw_inbound_msg_size = IRDMA_MAX_INBOUND_MSG_SIZE; dev->hw_attrs.max_hw_device_pages = IRDMA_MAX_PUSH_PAGE_COUNT; dev->hw_attrs.uk_attrs.max_hw_inline = IRDMA_MAX_INLINE_DATA_SIZE; dev->hw_attrs.max_hw_wqes = IRDMA_MAX_WQ_ENTRIES; dev->hw_attrs.max_qp_wr = IRDMA_MAX_QP_WRS(IRDMA_MAX_QUANTA_PER_WR); dev->hw_attrs.uk_attrs.max_hw_rq_quanta = IRDMA_QP_SW_MAX_RQ_QUANTA; dev->hw_attrs.uk_attrs.max_hw_wq_quanta = IRDMA_QP_SW_MAX_WQ_QUANTA; dev->hw_attrs.max_hw_pds = IRDMA_MAX_PDS; dev->hw_attrs.max_hw_ena_vf_count = IRDMA_MAX_PE_ENA_VF_COUNT; dev->hw_attrs.max_pe_ready_count = 14; dev->hw_attrs.max_done_count = IRDMA_DONE_COUNT; dev->hw_attrs.max_sleep_count = IRDMA_SLEEP_COUNT; dev->hw_attrs.max_cqp_compl_wait_time_ms = CQP_COMPL_WAIT_TIME_MS; dev->hw_attrs.uk_attrs.hw_rev = ver; irdma_sc_init_hw(dev); if (irdma_wait_pe_ready(dev)) return -ETIMEDOUT; val = readl(dev->hw_regs[IRDMA_GLPCI_LBARCTRL]); db_size = (u8)FIELD_GET(IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE, val); if (db_size != IRDMA_PE_DB_SIZE_4M && db_size != IRDMA_PE_DB_SIZE_8M) { ibdev_dbg(to_ibdev(dev), "DEV: RDMA PE doorbell is not enabled in CSR val 0x%x db_size=%d\n", val, db_size); return -ENODEV; } dev->db_addr = dev->hw->hw_addr + (uintptr_t)dev->hw_regs[IRDMA_DB_ADDR_OFFSET]; return ret_code; } /** * irdma_stat_val - Extract HW counter value from statistics buffer * @stats_val: pointer to statistics buffer * @byteoff: byte offset of counter value in the buffer (8B-aligned) * @bitoff: bit offset of counter value within 8B entry * @bitmask: maximum counter value (e.g. 0xffffff for 24-bit counter) */ static inline u64 irdma_stat_val(const u64 *stats_val, u16 byteoff, u8 bitoff, u64 bitmask) { u16 idx = byteoff / sizeof(*stats_val); return (stats_val[idx] >> bitoff) & bitmask; } /** * irdma_stat_delta - Calculate counter delta * @new_val: updated counter value * @old_val: last counter value * @max_val: maximum counter value (e.g. 0xffffff for 24-bit counter) */ static inline u64 irdma_stat_delta(u64 new_val, u64 old_val, u64 max_val) { if (new_val >= old_val) return new_val - old_val; /* roll-over case */ return max_val - old_val + new_val + 1; } /** * irdma_update_stats - Update statistics * @hw_stats: hw_stats instance to update * @gather_stats: updated stat counters * @last_gather_stats: last stat counters * @map: HW stat map (hw_stats => gather_stats) * @max_stat_idx: number of HW stats */ void irdma_update_stats(struct irdma_dev_hw_stats *hw_stats, struct irdma_gather_stats *gather_stats, struct irdma_gather_stats *last_gather_stats, const struct irdma_hw_stat_map *map, u16 max_stat_idx) { u64 *stats_val = hw_stats->stats_val; u16 i; for (i = 0; i < max_stat_idx; i++) { u64 new_val = irdma_stat_val(gather_stats->val, map[i].byteoff, map[i].bitoff, map[i].bitmask); u64 last_val = irdma_stat_val(last_gather_stats->val, map[i].byteoff, map[i].bitoff, map[i].bitmask); stats_val[i] += irdma_stat_delta(new_val, last_val, map[i].bitmask); } memcpy(last_gather_stats, gather_stats, sizeof(*last_gather_stats)); }
linux-master
drivers/infiniband/hw/irdma/ctrl.c
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB /* Copyright (c) 2015 - 2021 Intel Corporation */ #include "osdep.h" #include "defs.h" #include "user.h" #include "irdma.h" /** * irdma_set_fragment - set fragment in wqe * @wqe: wqe for setting fragment * @offset: offset value * @sge: sge length and stag * @valid: The wqe valid */ static void irdma_set_fragment(__le64 *wqe, u32 offset, struct ib_sge *sge, u8 valid) { if (sge) { set_64bit_val(wqe, offset, FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr)); set_64bit_val(wqe, offset + 8, FIELD_PREP(IRDMAQPSQ_VALID, valid) | FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->length) | FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->lkey)); } else { set_64bit_val(wqe, offset, 0); set_64bit_val(wqe, offset + 8, FIELD_PREP(IRDMAQPSQ_VALID, valid)); } } /** * irdma_set_fragment_gen_1 - set fragment in wqe * @wqe: wqe for setting fragment * @offset: offset value * @sge: sge length and stag * @valid: wqe valid flag */ static void irdma_set_fragment_gen_1(__le64 *wqe, u32 offset, struct ib_sge *sge, u8 valid) { if (sge) { set_64bit_val(wqe, offset, FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr)); set_64bit_val(wqe, offset + 8, FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->length) | FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->lkey)); } else { set_64bit_val(wqe, offset, 0); set_64bit_val(wqe, offset + 8, 0); } } /** * irdma_nop_1 - insert a NOP wqe * @qp: hw qp ptr */ static int irdma_nop_1(struct irdma_qp_uk *qp) { u64 hdr; __le64 *wqe; u32 wqe_idx; bool signaled = false; if (!qp->sq_ring.head) return -EINVAL; wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring); wqe = qp->sq_base[wqe_idx].elem; qp->sq_wrtrk_array[wqe_idx].quanta = IRDMA_QP_WQE_MIN_QUANTA; set_64bit_val(wqe, 0, 0); set_64bit_val(wqe, 8, 0); set_64bit_val(wqe, 16, 0); hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) | FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) | FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity); /* make sure WQE is written before valid bit is set */ dma_wmb(); set_64bit_val(wqe, 24, hdr); return 0; } /** * irdma_clr_wqes - clear next 128 sq entries * @qp: hw qp ptr * @qp_wqe_idx: wqe_idx */ void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx) { struct irdma_qp_quanta *sq; u32 wqe_idx; if (!(qp_wqe_idx & 0x7F)) { wqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size; sq = qp->sq_base + wqe_idx; if (wqe_idx) memset(sq, qp->swqe_polarity ? 0 : 0xFF, 128 * sizeof(*sq)); else memset(sq, qp->swqe_polarity ? 0xFF : 0, 128 * sizeof(*sq)); } } /** * irdma_uk_qp_post_wr - ring doorbell * @qp: hw qp ptr */ void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp) { u64 temp; u32 hw_sq_tail; u32 sw_sq_head; /* valid bit is written and loads completed before reading shadow */ mb(); /* read the doorbell shadow area */ get_64bit_val(qp->shadow_area, 0, &temp); hw_sq_tail = (u32)FIELD_GET(IRDMA_QP_DBSA_HW_SQ_TAIL, temp); sw_sq_head = IRDMA_RING_CURRENT_HEAD(qp->sq_ring); if (sw_sq_head != qp->initial_ring.head) { if (sw_sq_head != hw_sq_tail) { if (sw_sq_head > qp->initial_ring.head) { if (hw_sq_tail >= qp->initial_ring.head && hw_sq_tail < sw_sq_head) writel(qp->qp_id, qp->wqe_alloc_db); } else { if (hw_sq_tail >= qp->initial_ring.head || hw_sq_tail < sw_sq_head) writel(qp->qp_id, qp->wqe_alloc_db); } } } qp->initial_ring.head = qp->sq_ring.head; } /** * irdma_qp_get_next_send_wqe - pad with NOP if needed, return where next WR should go * @qp: hw qp ptr * @wqe_idx: return wqe index * @quanta: size of WR in quanta * @total_size: size of WR in bytes * @info: info on WR */ __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx, u16 quanta, u32 total_size, struct irdma_post_sq_info *info) { __le64 *wqe; __le64 *wqe_0 = NULL; u16 avail_quanta; u16 i; avail_quanta = qp->uk_attrs->max_hw_sq_chunk - (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) % qp->uk_attrs->max_hw_sq_chunk); if (quanta <= avail_quanta) { /* WR fits in current chunk */ if (quanta > IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring)) return NULL; } else { /* Need to pad with NOP */ if (quanta + avail_quanta > IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring)) return NULL; for (i = 0; i < avail_quanta; i++) { irdma_nop_1(qp); IRDMA_RING_MOVE_HEAD_NOCHECK(qp->sq_ring); } } *wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring); if (!*wqe_idx) qp->swqe_polarity = !qp->swqe_polarity; IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, quanta); wqe = qp->sq_base[*wqe_idx].elem; if (qp->uk_attrs->hw_rev == IRDMA_GEN_1 && quanta == 1 && (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) & 1)) { wqe_0 = qp->sq_base[IRDMA_RING_CURRENT_HEAD(qp->sq_ring)].elem; wqe_0[3] = cpu_to_le64(FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity ? 0 : 1)); } qp->sq_wrtrk_array[*wqe_idx].wrid = info->wr_id; qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size; qp->sq_wrtrk_array[*wqe_idx].quanta = quanta; return wqe; } /** * irdma_qp_get_next_recv_wqe - get next qp's rcv wqe * @qp: hw qp ptr * @wqe_idx: return wqe index */ __le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx) { __le64 *wqe; int ret_code; if (IRDMA_RING_FULL_ERR(qp->rq_ring)) return NULL; IRDMA_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code); if (ret_code) return NULL; if (!*wqe_idx) qp->rwqe_polarity = !qp->rwqe_polarity; /* rq_wqe_size_multiplier is no of 32 byte quanta in one rq wqe */ wqe = qp->rq_base[*wqe_idx * qp->rq_wqe_size_multiplier].elem; return wqe; } /** * irdma_uk_rdma_write - rdma write operation * @qp: hw qp ptr * @info: post sq information * @post_sq: flag to post sq */ int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, bool post_sq) { u64 hdr; __le64 *wqe; struct irdma_rdma_write *op_info; u32 i, wqe_idx; u32 total_size = 0, byte_off; int ret_code; u32 frag_cnt, addl_frag_cnt; bool read_fence = false; u16 quanta; op_info = &info->op.rdma_write; if (op_info->num_lo_sges > qp->max_sq_frag_cnt) return -EINVAL; for (i = 0; i < op_info->num_lo_sges; i++) total_size += op_info->lo_sg_list[i].length; read_fence |= info->read_fence; if (info->imm_data_valid) frag_cnt = op_info->num_lo_sges + 1; else frag_cnt = op_info->num_lo_sges; addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0; ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta); if (ret_code) return ret_code; wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size, info); if (!wqe) return -ENOMEM; irdma_clr_wqes(qp, wqe_idx); set_64bit_val(wqe, 16, FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr)); if (info->imm_data_valid) { set_64bit_val(wqe, 0, FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data)); i = 0; } else { qp->wqe_ops.iw_set_fragment(wqe, 0, op_info->lo_sg_list, qp->swqe_polarity); i = 1; } for (byte_off = 32; i < op_info->num_lo_sges; i++) { qp->wqe_ops.iw_set_fragment(wqe, byte_off, &op_info->lo_sg_list[i], qp->swqe_polarity); byte_off += 16; } /* if not an odd number set valid bit in next fragment */ if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) && frag_cnt) { qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL, qp->swqe_polarity); if (qp->uk_attrs->hw_rev == IRDMA_GEN_2) ++addl_frag_cnt; } hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) | FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) | FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid) | FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt) | FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) | FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) | FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) | FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity); dma_wmb(); /* make sure WQE is populated before valid bit is set */ set_64bit_val(wqe, 24, hdr); if (post_sq) irdma_uk_qp_post_wr(qp); return 0; } /** * irdma_uk_rdma_read - rdma read command * @qp: hw qp ptr * @info: post sq information * @inv_stag: flag for inv_stag * @post_sq: flag to post sq */ int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, bool inv_stag, bool post_sq) { struct irdma_rdma_read *op_info; int ret_code; u32 i, byte_off, total_size = 0; bool local_fence = false; u32 addl_frag_cnt; __le64 *wqe; u32 wqe_idx; u16 quanta; u64 hdr; op_info = &info->op.rdma_read; if (qp->max_sq_frag_cnt < op_info->num_lo_sges) return -EINVAL; for (i = 0; i < op_info->num_lo_sges; i++) total_size += op_info->lo_sg_list[i].length; ret_code = irdma_fragcnt_to_quanta_sq(op_info->num_lo_sges, &quanta); if (ret_code) return ret_code; wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size, info); if (!wqe) return -ENOMEM; irdma_clr_wqes(qp, wqe_idx); addl_frag_cnt = op_info->num_lo_sges > 1 ? (op_info->num_lo_sges - 1) : 0; local_fence |= info->local_fence; qp->wqe_ops.iw_set_fragment(wqe, 0, op_info->lo_sg_list, qp->swqe_polarity); for (i = 1, byte_off = 32; i < op_info->num_lo_sges; ++i) { qp->wqe_ops.iw_set_fragment(wqe, byte_off, &op_info->lo_sg_list[i], qp->swqe_polarity); byte_off += 16; } /* if not an odd number set valid bit in next fragment */ if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(op_info->num_lo_sges & 0x01) && op_info->num_lo_sges) { qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL, qp->swqe_polarity); if (qp->uk_attrs->hw_rev == IRDMA_GEN_2) ++addl_frag_cnt; } set_64bit_val(wqe, 16, FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr)); hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) | FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) | FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) | FIELD_PREP(IRDMAQPSQ_OPCODE, (inv_stag ? IRDMAQP_OP_RDMA_READ_LOC_INV : IRDMAQP_OP_RDMA_READ)) | FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) | FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) | FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity); dma_wmb(); /* make sure WQE is populated before valid bit is set */ set_64bit_val(wqe, 24, hdr); if (post_sq) irdma_uk_qp_post_wr(qp); return 0; } /** * irdma_uk_send - rdma send command * @qp: hw qp ptr * @info: post sq information * @post_sq: flag to post sq */ int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, bool post_sq) { __le64 *wqe; struct irdma_post_send *op_info; u64 hdr; u32 i, wqe_idx, total_size = 0, byte_off; int ret_code; u32 frag_cnt, addl_frag_cnt; bool read_fence = false; u16 quanta; op_info = &info->op.send; if (qp->max_sq_frag_cnt < op_info->num_sges) return -EINVAL; for (i = 0; i < op_info->num_sges; i++) total_size += op_info->sg_list[i].length; if (info->imm_data_valid) frag_cnt = op_info->num_sges + 1; else frag_cnt = op_info->num_sges; ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta); if (ret_code) return ret_code; wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size, info); if (!wqe) return -ENOMEM; irdma_clr_wqes(qp, wqe_idx); read_fence |= info->read_fence; addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0; if (info->imm_data_valid) { set_64bit_val(wqe, 0, FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data)); i = 0; } else { qp->wqe_ops.iw_set_fragment(wqe, 0, frag_cnt ? op_info->sg_list : NULL, qp->swqe_polarity); i = 1; } for (byte_off = 32; i < op_info->num_sges; i++) { qp->wqe_ops.iw_set_fragment(wqe, byte_off, &op_info->sg_list[i], qp->swqe_polarity); byte_off += 16; } /* if not an odd number set valid bit in next fragment */ if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) && frag_cnt) { qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL, qp->swqe_polarity); if (qp->uk_attrs->hw_rev == IRDMA_GEN_2) ++addl_frag_cnt; } set_64bit_val(wqe, 16, FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) | FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp)); hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) | FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) | FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, (info->imm_data_valid ? 1 : 0)) | FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) | FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) | FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) | FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) | FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) | FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) | FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) | FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity); dma_wmb(); /* make sure WQE is populated before valid bit is set */ set_64bit_val(wqe, 24, hdr); if (post_sq) irdma_uk_qp_post_wr(qp); return 0; } /** * irdma_set_mw_bind_wqe_gen_1 - set mw bind wqe * @wqe: wqe for setting fragment * @op_info: info for setting bind wqe values */ static void irdma_set_mw_bind_wqe_gen_1(__le64 *wqe, struct irdma_bind_window *op_info) { set_64bit_val(wqe, 0, (uintptr_t)op_info->va); set_64bit_val(wqe, 8, FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mw_stag) | FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mr_stag)); set_64bit_val(wqe, 16, op_info->bind_len); } /** * irdma_copy_inline_data_gen_1 - Copy inline data to wqe * @wqe: pointer to wqe * @sge_list: table of pointers to inline data * @num_sges: Total inline data length * @polarity: compatibility parameter */ static void irdma_copy_inline_data_gen_1(u8 *wqe, struct ib_sge *sge_list, u32 num_sges, u8 polarity) { u32 quanta_bytes_remaining = 16; int i; for (i = 0; i < num_sges; i++) { u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr; u32 sge_len = sge_list[i].length; while (sge_len) { u32 bytes_copied; bytes_copied = min(sge_len, quanta_bytes_remaining); memcpy(wqe, cur_sge, bytes_copied); wqe += bytes_copied; cur_sge += bytes_copied; quanta_bytes_remaining -= bytes_copied; sge_len -= bytes_copied; if (!quanta_bytes_remaining) { /* Remaining inline bytes reside after hdr */ wqe += 16; quanta_bytes_remaining = 32; } } } } /** * irdma_inline_data_size_to_quanta_gen_1 - based on inline data, quanta * @data_size: data size for inline * * Gets the quanta based on inline and immediate data. */ static inline u16 irdma_inline_data_size_to_quanta_gen_1(u32 data_size) { return data_size <= 16 ? IRDMA_QP_WQE_MIN_QUANTA : 2; } /** * irdma_set_mw_bind_wqe - set mw bind in wqe * @wqe: wqe for setting mw bind * @op_info: info for setting wqe values */ static void irdma_set_mw_bind_wqe(__le64 *wqe, struct irdma_bind_window *op_info) { set_64bit_val(wqe, 0, (uintptr_t)op_info->va); set_64bit_val(wqe, 8, FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mr_stag) | FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mw_stag)); set_64bit_val(wqe, 16, op_info->bind_len); } /** * irdma_copy_inline_data - Copy inline data to wqe * @wqe: pointer to wqe * @sge_list: table of pointers to inline data * @num_sges: number of SGE's * @polarity: polarity of wqe valid bit */ static void irdma_copy_inline_data(u8 *wqe, struct ib_sge *sge_list, u32 num_sges, u8 polarity) { u8 inline_valid = polarity << IRDMA_INLINE_VALID_S; u32 quanta_bytes_remaining = 8; bool first_quanta = true; int i; wqe += 8; for (i = 0; i < num_sges; i++) { u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr; u32 sge_len = sge_list[i].length; while (sge_len) { u32 bytes_copied; bytes_copied = min(sge_len, quanta_bytes_remaining); memcpy(wqe, cur_sge, bytes_copied); wqe += bytes_copied; cur_sge += bytes_copied; quanta_bytes_remaining -= bytes_copied; sge_len -= bytes_copied; if (!quanta_bytes_remaining) { quanta_bytes_remaining = 31; /* Remaining inline bytes reside after hdr */ if (first_quanta) { first_quanta = false; wqe += 16; } else { *wqe = inline_valid; wqe++; } } } } if (!first_quanta && quanta_bytes_remaining < 31) *(wqe + quanta_bytes_remaining) = inline_valid; } /** * irdma_inline_data_size_to_quanta - based on inline data, quanta * @data_size: data size for inline * * Gets the quanta based on inline and immediate data. */ static u16 irdma_inline_data_size_to_quanta(u32 data_size) { if (data_size <= 8) return IRDMA_QP_WQE_MIN_QUANTA; else if (data_size <= 39) return 2; else if (data_size <= 70) return 3; else if (data_size <= 101) return 4; else if (data_size <= 132) return 5; else if (data_size <= 163) return 6; else if (data_size <= 194) return 7; else return 8; } /** * irdma_uk_inline_rdma_write - inline rdma write operation * @qp: hw qp ptr * @info: post sq information * @post_sq: flag to post sq */ int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, bool post_sq) { __le64 *wqe; struct irdma_rdma_write *op_info; u64 hdr = 0; u32 wqe_idx; bool read_fence = false; u32 i, total_size = 0; u16 quanta; op_info = &info->op.rdma_write; if (unlikely(qp->max_sq_frag_cnt < op_info->num_lo_sges)) return -EINVAL; for (i = 0; i < op_info->num_lo_sges; i++) total_size += op_info->lo_sg_list[i].length; if (unlikely(total_size > qp->max_inline_data)) return -EINVAL; quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size); wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size, info); if (!wqe) return -ENOMEM; irdma_clr_wqes(qp, wqe_idx); read_fence |= info->read_fence; set_64bit_val(wqe, 16, FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr)); hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) | FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) | FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) | FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) | FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) | FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid ? 1 : 0) | FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) | FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) | FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity); if (info->imm_data_valid) set_64bit_val(wqe, 0, FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data)); qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->lo_sg_list, op_info->num_lo_sges, qp->swqe_polarity); dma_wmb(); /* make sure WQE is populated before valid bit is set */ set_64bit_val(wqe, 24, hdr); if (post_sq) irdma_uk_qp_post_wr(qp); return 0; } /** * irdma_uk_inline_send - inline send operation * @qp: hw qp ptr * @info: post sq information * @post_sq: flag to post sq */ int irdma_uk_inline_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, bool post_sq) { __le64 *wqe; struct irdma_post_send *op_info; u64 hdr; u32 wqe_idx; bool read_fence = false; u32 i, total_size = 0; u16 quanta; op_info = &info->op.send; if (unlikely(qp->max_sq_frag_cnt < op_info->num_sges)) return -EINVAL; for (i = 0; i < op_info->num_sges; i++) total_size += op_info->sg_list[i].length; if (unlikely(total_size > qp->max_inline_data)) return -EINVAL; quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size); wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size, info); if (!wqe) return -ENOMEM; irdma_clr_wqes(qp, wqe_idx); set_64bit_val(wqe, 16, FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) | FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp)); read_fence |= info->read_fence; hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) | FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) | FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) | FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) | FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, (info->imm_data_valid ? 1 : 0)) | FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) | FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) | FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) | FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) | FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) | FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) | FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity); if (info->imm_data_valid) set_64bit_val(wqe, 0, FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data)); qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->sg_list, op_info->num_sges, qp->swqe_polarity); dma_wmb(); /* make sure WQE is populated before valid bit is set */ set_64bit_val(wqe, 24, hdr); if (post_sq) irdma_uk_qp_post_wr(qp); return 0; } /** * irdma_uk_stag_local_invalidate - stag invalidate operation * @qp: hw qp ptr * @info: post sq information * @post_sq: flag to post sq */ int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, bool post_sq) { __le64 *wqe; struct irdma_inv_local_stag *op_info; u64 hdr; u32 wqe_idx; bool local_fence = false; struct ib_sge sge = {}; op_info = &info->op.inv_local_stag; local_fence = info->local_fence; wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA, 0, info); if (!wqe) return -ENOMEM; irdma_clr_wqes(qp, wqe_idx); sge.lkey = op_info->target_stag; qp->wqe_ops.iw_set_fragment(wqe, 0, &sge, 0); set_64bit_val(wqe, 16, 0); hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_INV_STAG) | FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) | FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) | FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity); dma_wmb(); /* make sure WQE is populated before valid bit is set */ set_64bit_val(wqe, 24, hdr); if (post_sq) irdma_uk_qp_post_wr(qp); return 0; } /** * irdma_uk_post_receive - post receive wqe * @qp: hw qp ptr * @info: post rq information */ int irdma_uk_post_receive(struct irdma_qp_uk *qp, struct irdma_post_rq_info *info) { u32 wqe_idx, i, byte_off; u32 addl_frag_cnt; __le64 *wqe; u64 hdr; if (qp->max_rq_frag_cnt < info->num_sges) return -EINVAL; wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx); if (!wqe) return -ENOMEM; qp->rq_wrid_array[wqe_idx] = info->wr_id; addl_frag_cnt = info->num_sges > 1 ? (info->num_sges - 1) : 0; qp->wqe_ops.iw_set_fragment(wqe, 0, info->sg_list, qp->rwqe_polarity); for (i = 1, byte_off = 32; i < info->num_sges; i++) { qp->wqe_ops.iw_set_fragment(wqe, byte_off, &info->sg_list[i], qp->rwqe_polarity); byte_off += 16; } /* if not an odd number set valid bit in next fragment */ if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(info->num_sges & 0x01) && info->num_sges) { qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL, qp->rwqe_polarity); if (qp->uk_attrs->hw_rev == IRDMA_GEN_2) ++addl_frag_cnt; } set_64bit_val(wqe, 16, 0); hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) | FIELD_PREP(IRDMAQPSQ_VALID, qp->rwqe_polarity); dma_wmb(); /* make sure WQE is populated before valid bit is set */ set_64bit_val(wqe, 24, hdr); return 0; } /** * irdma_uk_cq_resize - reset the cq buffer info * @cq: cq to resize * @cq_base: new cq buffer addr * @cq_size: number of cqes */ void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int cq_size) { cq->cq_base = cq_base; cq->cq_size = cq_size; IRDMA_RING_INIT(cq->cq_ring, cq->cq_size); cq->polarity = 1; } /** * irdma_uk_cq_set_resized_cnt - record the count of the resized buffers * @cq: cq to resize * @cq_cnt: the count of the resized cq buffers */ void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *cq, u16 cq_cnt) { u64 temp_val; u16 sw_cq_sel; u8 arm_next_se; u8 arm_next; u8 arm_seq_num; get_64bit_val(cq->shadow_area, 32, &temp_val); sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val); sw_cq_sel += cq_cnt; arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val); arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val); arm_next = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT, temp_val); temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) | FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) | FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) | FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next); set_64bit_val(cq->shadow_area, 32, temp_val); } /** * irdma_uk_cq_request_notification - cq notification request (door bell) * @cq: hw cq * @cq_notify: notification type */ void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq, enum irdma_cmpl_notify cq_notify) { u64 temp_val; u16 sw_cq_sel; u8 arm_next_se = 0; u8 arm_next = 0; u8 arm_seq_num; get_64bit_val(cq->shadow_area, 32, &temp_val); arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val); arm_seq_num++; sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val); arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val); arm_next_se |= 1; if (cq_notify == IRDMA_CQ_COMPL_EVENT) arm_next = 1; temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) | FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) | FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) | FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next); set_64bit_val(cq->shadow_area, 32, temp_val); dma_wmb(); /* make sure WQE is populated before valid bit is set */ writel(cq->cq_id, cq->cqe_alloc_db); } /** * irdma_uk_cq_poll_cmpl - get cq completion info * @cq: hw cq * @info: cq poll information returned */ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info) { u64 comp_ctx, qword0, qword2, qword3; __le64 *cqe; struct irdma_qp_uk *qp; struct irdma_ring *pring = NULL; u32 wqe_idx; int ret_code; bool move_cq_head = true; u8 polarity; bool ext_valid; __le64 *ext_cqe; if (cq->avoid_mem_cflct) cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(cq); else cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq); get_64bit_val(cqe, 24, &qword3); polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3); if (polarity != cq->polarity) return -ENOENT; /* Ensure CQE contents are read after valid bit is checked */ dma_rmb(); ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3); if (ext_valid) { u64 qword6, qword7; u32 peek_head; if (cq->avoid_mem_cflct) { ext_cqe = (__le64 *)((u8 *)cqe + 32); get_64bit_val(ext_cqe, 24, &qword7); polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7); } else { peek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size; ext_cqe = cq->cq_base[peek_head].buf; get_64bit_val(ext_cqe, 24, &qword7); polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7); if (!peek_head) polarity ^= 1; } if (polarity != cq->polarity) return -ENOENT; /* Ensure ext CQE contents are read after ext valid bit is checked */ dma_rmb(); info->imm_valid = (bool)FIELD_GET(IRDMA_CQ_IMMVALID, qword7); if (info->imm_valid) { u64 qword4; get_64bit_val(ext_cqe, 0, &qword4); info->imm_data = (u32)FIELD_GET(IRDMA_CQ_IMMDATALOW32, qword4); } info->ud_smac_valid = (bool)FIELD_GET(IRDMA_CQ_UDSMACVALID, qword7); info->ud_vlan_valid = (bool)FIELD_GET(IRDMA_CQ_UDVLANVALID, qword7); if (info->ud_smac_valid || info->ud_vlan_valid) { get_64bit_val(ext_cqe, 16, &qword6); if (info->ud_vlan_valid) info->ud_vlan = (u16)FIELD_GET(IRDMA_CQ_UDVLAN, qword6); if (info->ud_smac_valid) { info->ud_smac[5] = qword6 & 0xFF; info->ud_smac[4] = (qword6 >> 8) & 0xFF; info->ud_smac[3] = (qword6 >> 16) & 0xFF; info->ud_smac[2] = (qword6 >> 24) & 0xFF; info->ud_smac[1] = (qword6 >> 32) & 0xFF; info->ud_smac[0] = (qword6 >> 40) & 0xFF; } } } else { info->imm_valid = false; info->ud_smac_valid = false; info->ud_vlan_valid = false; } info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3); info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3); info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3); if (info->error) { info->major_err = FIELD_GET(IRDMA_CQ_MAJERR, qword3); info->minor_err = FIELD_GET(IRDMA_CQ_MINERR, qword3); if (info->major_err == IRDMA_FLUSH_MAJOR_ERR) { info->comp_status = IRDMA_COMPL_STATUS_FLUSHED; /* Set the min error to standard flush error code for remaining cqes */ if (info->minor_err != FLUSH_GENERAL_ERR) { qword3 &= ~IRDMA_CQ_MINERR; qword3 |= FIELD_PREP(IRDMA_CQ_MINERR, FLUSH_GENERAL_ERR); set_64bit_val(cqe, 24, qword3); } } else { info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN; } } else { info->comp_status = IRDMA_COMPL_STATUS_SUCCESS; } get_64bit_val(cqe, 0, &qword0); get_64bit_val(cqe, 16, &qword2); info->tcp_seq_num_rtt = (u32)FIELD_GET(IRDMACQ_TCPSEQNUMRTT, qword0); info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2); info->ud_src_qpn = (u32)FIELD_GET(IRDMACQ_UDSRCQPN, qword2); get_64bit_val(cqe, 8, &comp_ctx); info->solicited_event = (bool)FIELD_GET(IRDMACQ_SOEVENT, qword3); qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx; if (!qp || qp->destroy_pending) { ret_code = -EFAULT; goto exit; } wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3); info->qp_handle = (irdma_qp_handle)(unsigned long)qp; info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3); if (info->q_type == IRDMA_CQE_QTYPE_RQ) { u32 array_idx; array_idx = wqe_idx / qp->rq_wqe_size_multiplier; if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED || info->comp_status == IRDMA_COMPL_STATUS_UNKNOWN) { if (!IRDMA_RING_MORE_WORK(qp->rq_ring)) { ret_code = -ENOENT; goto exit; } info->wr_id = qp->rq_wrid_array[qp->rq_ring.tail]; array_idx = qp->rq_ring.tail; } else { info->wr_id = qp->rq_wrid_array[array_idx]; } info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0); if (qword3 & IRDMACQ_STAG) { info->stag_invalid_set = true; info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG, qword2); } else { info->stag_invalid_set = false; } IRDMA_RING_SET_TAIL(qp->rq_ring, array_idx + 1); if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) { qp->rq_flush_seen = true; if (!IRDMA_RING_MORE_WORK(qp->rq_ring)) qp->rq_flush_complete = true; else move_cq_head = false; } pring = &qp->rq_ring; } else { /* q_type is IRDMA_CQE_QTYPE_SQ */ if (qp->first_sq_wq) { if (wqe_idx + 1 >= qp->conn_wqes) qp->first_sq_wq = false; if (wqe_idx < qp->conn_wqes && qp->sq_ring.head == qp->sq_ring.tail) { IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring); IRDMA_RING_MOVE_TAIL(cq->cq_ring); set_64bit_val(cq->shadow_area, 0, IRDMA_RING_CURRENT_HEAD(cq->cq_ring)); memset(info, 0, sizeof(struct irdma_cq_poll_info)); return irdma_uk_cq_poll_cmpl(cq, info); } } if (info->comp_status != IRDMA_COMPL_STATUS_FLUSHED) { info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid; if (!info->comp_status) info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len; info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3); IRDMA_RING_SET_TAIL(qp->sq_ring, wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta); } else { if (!IRDMA_RING_MORE_WORK(qp->sq_ring)) { ret_code = -ENOENT; goto exit; } do { __le64 *sw_wqe; u64 wqe_qword; u32 tail; tail = qp->sq_ring.tail; sw_wqe = qp->sq_base[tail].elem; get_64bit_val(sw_wqe, 24, &wqe_qword); info->op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, wqe_qword); IRDMA_RING_SET_TAIL(qp->sq_ring, tail + qp->sq_wrtrk_array[tail].quanta); if (info->op_type != IRDMAQP_OP_NOP) { info->wr_id = qp->sq_wrtrk_array[tail].wrid; info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len; break; } } while (1); if (info->op_type == IRDMA_OP_TYPE_BIND_MW && info->minor_err == FLUSH_PROT_ERR) info->minor_err = FLUSH_MW_BIND_ERR; qp->sq_flush_seen = true; if (!IRDMA_RING_MORE_WORK(qp->sq_ring)) qp->sq_flush_complete = true; } pring = &qp->sq_ring; } ret_code = 0; exit: if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) if (pring && IRDMA_RING_MORE_WORK(*pring)) move_cq_head = false; if (move_cq_head) { IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring); if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring)) cq->polarity ^= 1; if (ext_valid && !cq->avoid_mem_cflct) { IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring); if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring)) cq->polarity ^= 1; } IRDMA_RING_MOVE_TAIL(cq->cq_ring); if (!cq->avoid_mem_cflct && ext_valid) IRDMA_RING_MOVE_TAIL(cq->cq_ring); set_64bit_val(cq->shadow_area, 0, IRDMA_RING_CURRENT_HEAD(cq->cq_ring)); } else { qword3 &= ~IRDMA_CQ_WQEIDX; qword3 |= FIELD_PREP(IRDMA_CQ_WQEIDX, pring->tail); set_64bit_val(cqe, 24, qword3); } return ret_code; } /** * irdma_qp_round_up - return round up qp wq depth * @wqdepth: wq depth in quanta to round up */ static int irdma_qp_round_up(u32 wqdepth) { int scount = 1; for (wqdepth--; scount <= 16; scount *= 2) wqdepth |= wqdepth >> scount; return ++wqdepth; } /** * irdma_get_wqe_shift - get shift count for maximum wqe size * @uk_attrs: qp HW attributes * @sge: Maximum Scatter Gather Elements wqe * @inline_data: Maximum inline data size * @shift: Returns the shift needed based on sge * * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size. * For 1 SGE or inline data <= 8, shift = 0 (wqe size of 32 * bytes). For 2 or 3 SGEs or inline data <= 39, shift = 1 (wqe * size of 64 bytes). * For 4-7 SGE's and inline <= 101 Shift of 2 otherwise (wqe * size of 256 bytes). */ void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge, u32 inline_data, u8 *shift) { *shift = 0; if (uk_attrs->hw_rev >= IRDMA_GEN_2) { if (sge > 1 || inline_data > 8) { if (sge < 4 && inline_data <= 39) *shift = 1; else if (sge < 8 && inline_data <= 101) *shift = 2; else *shift = 3; } } else if (sge > 1 || inline_data > 16) { *shift = (sge < 4 && inline_data <= 48) ? 1 : 2; } } /* * irdma_get_sqdepth - get SQ depth (quanta) * @uk_attrs: qp HW attributes * @sq_size: SQ size * @shift: shift which determines size of WQE * @sqdepth: depth of SQ * */ int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift, u32 *sqdepth) { u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift; *sqdepth = irdma_qp_round_up((sq_size << shift) + IRDMA_SQ_RSVD); if (*sqdepth < min_size) *sqdepth = min_size; else if (*sqdepth > uk_attrs->max_hw_wq_quanta) return -EINVAL; return 0; } /* * irdma_get_rqdepth - get RQ depth (quanta) * @uk_attrs: qp HW attributes * @rq_size: RQ size * @shift: shift which determines size of WQE * @rqdepth: depth of RQ */ int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift, u32 *rqdepth) { u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift; *rqdepth = irdma_qp_round_up((rq_size << shift) + IRDMA_RQ_RSVD); if (*rqdepth < min_size) *rqdepth = min_size; else if (*rqdepth > uk_attrs->max_hw_rq_quanta) return -EINVAL; return 0; } static const struct irdma_wqe_uk_ops iw_wqe_uk_ops = { .iw_copy_inline_data = irdma_copy_inline_data, .iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta, .iw_set_fragment = irdma_set_fragment, .iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe, }; static const struct irdma_wqe_uk_ops iw_wqe_uk_ops_gen_1 = { .iw_copy_inline_data = irdma_copy_inline_data_gen_1, .iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta_gen_1, .iw_set_fragment = irdma_set_fragment_gen_1, .iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe_gen_1, }; /** * irdma_setup_connection_wqes - setup WQEs necessary to complete * connection. * @qp: hw qp (user and kernel) * @info: qp initialization info */ static void irdma_setup_connection_wqes(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info) { u16 move_cnt = 1; if (!info->legacy_mode && (qp->uk_attrs->feature_flags & IRDMA_FEATURE_RTS_AE)) move_cnt = 3; qp->conn_wqes = move_cnt; IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, move_cnt); IRDMA_RING_MOVE_TAIL_BY_COUNT(qp->sq_ring, move_cnt); IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->initial_ring, move_cnt); } /** * irdma_uk_calc_shift_wq - calculate WQE shift for both SQ and RQ * @ukinfo: qp initialization info * @sq_shift: Returns shift of SQ * @rq_shift: Returns shift of RQ */ void irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift, u8 *rq_shift) { bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2; irdma_get_wqe_shift(ukinfo->uk_attrs, imm_support ? ukinfo->max_sq_frag_cnt + 1 : ukinfo->max_sq_frag_cnt, ukinfo->max_inline_data, sq_shift); irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0, rq_shift); if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) { if (ukinfo->abi_ver > 4) *rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1; } } /** * irdma_uk_calc_depth_shift_sq - calculate depth and shift for SQ size. * @ukinfo: qp initialization info * @sq_depth: Returns depth of SQ * @sq_shift: Returns shift of SQ */ int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo, u32 *sq_depth, u8 *sq_shift) { bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2; int status; irdma_get_wqe_shift(ukinfo->uk_attrs, imm_support ? ukinfo->max_sq_frag_cnt + 1 : ukinfo->max_sq_frag_cnt, ukinfo->max_inline_data, sq_shift); status = irdma_get_sqdepth(ukinfo->uk_attrs, ukinfo->sq_size, *sq_shift, sq_depth); return status; } /** * irdma_uk_calc_depth_shift_rq - calculate depth and shift for RQ size. * @ukinfo: qp initialization info * @rq_depth: Returns depth of RQ * @rq_shift: Returns shift of RQ */ int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo, u32 *rq_depth, u8 *rq_shift) { int status; irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0, rq_shift); if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) { if (ukinfo->abi_ver > 4) *rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1; } status = irdma_get_rqdepth(ukinfo->uk_attrs, ukinfo->rq_size, *rq_shift, rq_depth); return status; } /** * irdma_uk_qp_init - initialize shared qp * @qp: hw qp (user and kernel) * @info: qp initialization info * * initializes the vars used in both user and kernel mode. * size of the wqe depends on numbers of max. fragements * allowed. Then size of wqe * the number of wqes should be the * amount of memory allocated for sq and rq. */ int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info) { int ret_code = 0; u32 sq_ring_size; qp->uk_attrs = info->uk_attrs; if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags || info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags) return -EINVAL; qp->qp_caps = info->qp_caps; qp->sq_base = info->sq; qp->rq_base = info->rq; qp->qp_type = info->type ? info->type : IRDMA_QP_TYPE_IWARP; qp->shadow_area = info->shadow_area; qp->sq_wrtrk_array = info->sq_wrtrk_array; qp->rq_wrid_array = info->rq_wrid_array; qp->wqe_alloc_db = info->wqe_alloc_db; qp->qp_id = info->qp_id; qp->sq_size = info->sq_size; qp->max_sq_frag_cnt = info->max_sq_frag_cnt; sq_ring_size = qp->sq_size << info->sq_shift; IRDMA_RING_INIT(qp->sq_ring, sq_ring_size); IRDMA_RING_INIT(qp->initial_ring, sq_ring_size); if (info->first_sq_wq) { irdma_setup_connection_wqes(qp, info); qp->swqe_polarity = 1; qp->first_sq_wq = true; } else { qp->swqe_polarity = 0; } qp->swqe_polarity_deferred = 1; qp->rwqe_polarity = 0; qp->rq_size = info->rq_size; qp->max_rq_frag_cnt = info->max_rq_frag_cnt; qp->max_inline_data = info->max_inline_data; qp->rq_wqe_size = info->rq_shift; IRDMA_RING_INIT(qp->rq_ring, qp->rq_size); qp->rq_wqe_size_multiplier = 1 << info->rq_shift; if (qp->uk_attrs->hw_rev == IRDMA_GEN_1) qp->wqe_ops = iw_wqe_uk_ops_gen_1; else qp->wqe_ops = iw_wqe_uk_ops; return ret_code; } /** * irdma_uk_cq_init - initialize shared cq (user and kernel) * @cq: hw cq * @info: hw cq initialization info */ void irdma_uk_cq_init(struct irdma_cq_uk *cq, struct irdma_cq_uk_init_info *info) { cq->cq_base = info->cq_base; cq->cq_id = info->cq_id; cq->cq_size = info->cq_size; cq->cqe_alloc_db = info->cqe_alloc_db; cq->cq_ack_db = info->cq_ack_db; cq->shadow_area = info->shadow_area; cq->avoid_mem_cflct = info->avoid_mem_cflct; IRDMA_RING_INIT(cq->cq_ring, cq->cq_size); cq->polarity = 1; } /** * irdma_uk_clean_cq - clean cq entries * @q: completion context * @cq: cq to clean */ void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq) { __le64 *cqe; u64 qword3, comp_ctx; u32 cq_head; u8 polarity, temp; cq_head = cq->cq_ring.head; temp = cq->polarity; do { if (cq->avoid_mem_cflct) cqe = ((struct irdma_extended_cqe *)(cq->cq_base))[cq_head].buf; else cqe = cq->cq_base[cq_head].buf; get_64bit_val(cqe, 24, &qword3); polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3); if (polarity != temp) break; /* Ensure CQE contents are read after valid bit is checked */ dma_rmb(); get_64bit_val(cqe, 8, &comp_ctx); if ((void *)(unsigned long)comp_ctx == q) set_64bit_val(cqe, 8, 0); cq_head = (cq_head + 1) % cq->cq_ring.size; if (!cq_head) temp ^= 1; } while (true); } /** * irdma_nop - post a nop * @qp: hw qp ptr * @wr_id: work request id * @signaled: signaled for completion * @post_sq: ring doorbell */ int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq) { __le64 *wqe; u64 hdr; u32 wqe_idx; struct irdma_post_sq_info info = {}; info.wr_id = wr_id; wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA, 0, &info); if (!wqe) return -ENOMEM; irdma_clr_wqes(qp, wqe_idx); set_64bit_val(wqe, 0, 0); set_64bit_val(wqe, 8, 0); set_64bit_val(wqe, 16, 0); hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) | FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) | FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity); dma_wmb(); /* make sure WQE is populated before valid bit is set */ set_64bit_val(wqe, 24, hdr); if (post_sq) irdma_uk_qp_post_wr(qp); return 0; } /** * irdma_fragcnt_to_quanta_sq - calculate quanta based on fragment count for SQ * @frag_cnt: number of fragments * @quanta: quanta for frag_cnt */ int irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta) { switch (frag_cnt) { case 0: case 1: *quanta = IRDMA_QP_WQE_MIN_QUANTA; break; case 2: case 3: *quanta = 2; break; case 4: case 5: *quanta = 3; break; case 6: case 7: *quanta = 4; break; case 8: case 9: *quanta = 5; break; case 10: case 11: *quanta = 6; break; case 12: case 13: *quanta = 7; break; case 14: case 15: /* when immediate data is present */ *quanta = 8; break; default: return -EINVAL; } return 0; } /** * irdma_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ * @frag_cnt: number of fragments * @wqe_size: size in bytes given frag_cnt */ int irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size) { switch (frag_cnt) { case 0: case 1: *wqe_size = 32; break; case 2: case 3: *wqe_size = 64; break; case 4: case 5: case 6: case 7: *wqe_size = 128; break; case 8: case 9: case 10: case 11: case 12: case 13: case 14: *wqe_size = 256; break; default: return -EINVAL; } return 0; }
linux-master
drivers/infiniband/hw/irdma/uk.c
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB /* Copyright (c) 2015 - 2021 Intel Corporation */ #include "main.h" #include "i40iw_hw.h" #include <linux/net/intel/i40e_client.h> static struct i40e_client i40iw_client; /** * i40iw_l2param_change - handle mss change * @cdev_info: parent lan device information structure with data/ops * @client: client for parameter change * @params: new parameters from L2 */ static void i40iw_l2param_change(struct i40e_info *cdev_info, struct i40e_client *client, struct i40e_params *params) { struct irdma_l2params l2params = {}; struct irdma_device *iwdev; struct ib_device *ibdev; ibdev = ib_device_get_by_netdev(cdev_info->netdev, RDMA_DRIVER_IRDMA); if (!ibdev) return; iwdev = to_iwdev(ibdev); if (iwdev->vsi.mtu != params->mtu) { l2params.mtu_changed = true; l2params.mtu = params->mtu; } irdma_change_l2params(&iwdev->vsi, &l2params); ib_device_put(ibdev); } /** * i40iw_close - client interface operation close for iwarp/uda device * @cdev_info: parent lan device information structure with data/ops * @client: client to close * @reset: flag to indicate close on reset * * Called by the lan driver during the processing of client unregister * Destroy and clean up the driver resources */ static void i40iw_close(struct i40e_info *cdev_info, struct i40e_client *client, bool reset) { struct irdma_device *iwdev; struct ib_device *ibdev; ibdev = ib_device_get_by_netdev(cdev_info->netdev, RDMA_DRIVER_IRDMA); if (WARN_ON(!ibdev)) return; iwdev = to_iwdev(ibdev); if (reset) iwdev->rf->reset = true; iwdev->iw_status = 0; irdma_port_ibevent(iwdev); ib_unregister_device_and_put(ibdev); pr_debug("INIT: Gen1 PF[%d] close complete\n", PCI_FUNC(cdev_info->pcidev->devfn)); } static void i40iw_request_reset(struct irdma_pci_f *rf) { struct i40e_info *cdev_info = rf->cdev; cdev_info->ops->request_reset(cdev_info, &i40iw_client, 1); } static void i40iw_fill_device_info(struct irdma_device *iwdev, struct i40e_info *cdev_info) { struct irdma_pci_f *rf = iwdev->rf; rf->rdma_ver = IRDMA_GEN_1; rf->gen_ops.request_reset = i40iw_request_reset; rf->pcidev = cdev_info->pcidev; rf->pf_id = cdev_info->fid; rf->hw.hw_addr = cdev_info->hw_addr; rf->cdev = cdev_info; rf->msix_count = cdev_info->msix_count; rf->msix_entries = cdev_info->msix_entries; rf->limits_sel = 5; rf->protocol_used = IRDMA_IWARP_PROTOCOL_ONLY; rf->iwdev = iwdev; iwdev->init_state = INITIAL_STATE; iwdev->rcv_wnd = IRDMA_CM_DEFAULT_RCV_WND_SCALED; iwdev->rcv_wscale = IRDMA_CM_DEFAULT_RCV_WND_SCALE; iwdev->netdev = cdev_info->netdev; iwdev->vsi_num = 0; } /** * i40iw_open - client interface operation open for iwarp/uda device * @cdev_info: parent lan device information structure with data/ops * @client: iwarp client information, provided during registration * * Called by the lan driver during the processing of client register * Create device resources, set up queues, pble and hmc objects and * register the device with the ib verbs interface * Return 0 if successful, otherwise return error */ static int i40iw_open(struct i40e_info *cdev_info, struct i40e_client *client) { struct irdma_l2params l2params = {}; struct irdma_device *iwdev; struct irdma_pci_f *rf; int err = -EIO; int i; u16 qset; u16 last_qset = IRDMA_NO_QSET; iwdev = ib_alloc_device(irdma_device, ibdev); if (!iwdev) return -ENOMEM; iwdev->rf = kzalloc(sizeof(*rf), GFP_KERNEL); if (!iwdev->rf) { ib_dealloc_device(&iwdev->ibdev); return -ENOMEM; } i40iw_fill_device_info(iwdev, cdev_info); rf = iwdev->rf; if (irdma_ctrl_init_hw(rf)) { err = -EIO; goto err_ctrl_init; } l2params.mtu = (cdev_info->params.mtu) ? cdev_info->params.mtu : IRDMA_DEFAULT_MTU; for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++) { qset = cdev_info->params.qos.prio_qos[i].qs_handle; l2params.up2tc[i] = cdev_info->params.qos.prio_qos[i].tc; l2params.qs_handle_list[i] = qset; if (last_qset == IRDMA_NO_QSET) last_qset = qset; else if ((qset != last_qset) && (qset != IRDMA_NO_QSET)) iwdev->dcb_vlan_mode = true; } if (irdma_rt_init_hw(iwdev, &l2params)) { err = -EIO; goto err_rt_init; } err = irdma_ib_register_device(iwdev); if (err) goto err_ibreg; ibdev_dbg(&iwdev->ibdev, "INIT: Gen1 PF[%d] open success\n", PCI_FUNC(rf->pcidev->devfn)); return 0; err_ibreg: irdma_rt_deinit_hw(iwdev); err_rt_init: irdma_ctrl_deinit_hw(rf); err_ctrl_init: kfree(iwdev->rf); ib_dealloc_device(&iwdev->ibdev); return err; } /* client interface functions */ static const struct i40e_client_ops i40e_ops = { .open = i40iw_open, .close = i40iw_close, .l2_param_change = i40iw_l2param_change }; static struct i40e_client i40iw_client = { .ops = &i40e_ops, .type = I40E_CLIENT_IWARP, }; static int i40iw_probe(struct auxiliary_device *aux_dev, const struct auxiliary_device_id *id) { struct i40e_auxiliary_device *i40e_adev = container_of(aux_dev, struct i40e_auxiliary_device, aux_dev); struct i40e_info *cdev_info = i40e_adev->ldev; strncpy(i40iw_client.name, "irdma", I40E_CLIENT_STR_LENGTH); i40e_client_device_register(cdev_info, &i40iw_client); return 0; } static void i40iw_remove(struct auxiliary_device *aux_dev) { struct i40e_auxiliary_device *i40e_adev = container_of(aux_dev, struct i40e_auxiliary_device, aux_dev); struct i40e_info *cdev_info = i40e_adev->ldev; i40e_client_device_unregister(cdev_info); } static const struct auxiliary_device_id i40iw_auxiliary_id_table[] = { {.name = "i40e.iwarp", }, {}, }; MODULE_DEVICE_TABLE(auxiliary, i40iw_auxiliary_id_table); struct auxiliary_driver i40iw_auxiliary_drv = { .name = "gen_1", .id_table = i40iw_auxiliary_id_table, .probe = i40iw_probe, .remove = i40iw_remove, };
linux-master
drivers/infiniband/hw/irdma/i40iw_if.c
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB /* Copyright (c) 2015 - 2021 Intel Corporation */ #include "osdep.h" #include "hmc.h" #include "defs.h" #include "type.h" #include "protos.h" #include "puda.h" #include "ws.h" static void irdma_ieq_receive(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *buf); static void irdma_ieq_tx_compl(struct irdma_sc_vsi *vsi, void *sqwrid); static void irdma_ilq_putback_rcvbuf(struct irdma_sc_qp *qp, struct irdma_puda_buf *buf, u32 wqe_idx); /** * irdma_puda_get_listbuf - get buffer from puda list * @list: list to use for buffers (ILQ or IEQ) */ static struct irdma_puda_buf *irdma_puda_get_listbuf(struct list_head *list) { struct irdma_puda_buf *buf = NULL; if (!list_empty(list)) { buf = (struct irdma_puda_buf *)list->next; list_del((struct list_head *)&buf->list); } return buf; } /** * irdma_puda_get_bufpool - return buffer from resource * @rsrc: resource to use for buffer */ struct irdma_puda_buf *irdma_puda_get_bufpool(struct irdma_puda_rsrc *rsrc) { struct irdma_puda_buf *buf = NULL; struct list_head *list = &rsrc->bufpool; unsigned long flags; spin_lock_irqsave(&rsrc->bufpool_lock, flags); buf = irdma_puda_get_listbuf(list); if (buf) { rsrc->avail_buf_count--; buf->vsi = rsrc->vsi; } else { rsrc->stats_buf_alloc_fail++; } spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); return buf; } /** * irdma_puda_ret_bufpool - return buffer to rsrc list * @rsrc: resource to use for buffer * @buf: buffer to return to resource */ void irdma_puda_ret_bufpool(struct irdma_puda_rsrc *rsrc, struct irdma_puda_buf *buf) { unsigned long flags; buf->do_lpb = false; spin_lock_irqsave(&rsrc->bufpool_lock, flags); list_add(&buf->list, &rsrc->bufpool); spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); rsrc->avail_buf_count++; } /** * irdma_puda_post_recvbuf - set wqe for rcv buffer * @rsrc: resource ptr * @wqe_idx: wqe index to use * @buf: puda buffer for rcv q * @initial: flag if during init time */ static void irdma_puda_post_recvbuf(struct irdma_puda_rsrc *rsrc, u32 wqe_idx, struct irdma_puda_buf *buf, bool initial) { __le64 *wqe; struct irdma_sc_qp *qp = &rsrc->qp; u64 offset24 = 0; /* Synch buffer for use by device */ dma_sync_single_for_device(rsrc->dev->hw->device, buf->mem.pa, buf->mem.size, DMA_BIDIRECTIONAL); qp->qp_uk.rq_wrid_array[wqe_idx] = (uintptr_t)buf; wqe = qp->qp_uk.rq_base[wqe_idx].elem; if (!initial) get_64bit_val(wqe, 24, &offset24); offset24 = (offset24) ? 0 : FIELD_PREP(IRDMAQPSQ_VALID, 1); set_64bit_val(wqe, 16, 0); set_64bit_val(wqe, 0, buf->mem.pa); if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) { set_64bit_val(wqe, 8, FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, buf->mem.size)); } else { set_64bit_val(wqe, 8, FIELD_PREP(IRDMAQPSQ_FRAG_LEN, buf->mem.size) | offset24); } dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, offset24); } /** * irdma_puda_replenish_rq - post rcv buffers * @rsrc: resource to use for buffer * @initial: flag if during init time */ static int irdma_puda_replenish_rq(struct irdma_puda_rsrc *rsrc, bool initial) { u32 i; u32 invalid_cnt = rsrc->rxq_invalid_cnt; struct irdma_puda_buf *buf = NULL; for (i = 0; i < invalid_cnt; i++) { buf = irdma_puda_get_bufpool(rsrc); if (!buf) return -ENOBUFS; irdma_puda_post_recvbuf(rsrc, rsrc->rx_wqe_idx, buf, initial); rsrc->rx_wqe_idx = ((rsrc->rx_wqe_idx + 1) % rsrc->rq_size); rsrc->rxq_invalid_cnt--; } return 0; } /** * irdma_puda_alloc_buf - allocate mem for buffer * @dev: iwarp device * @len: length of buffer */ static struct irdma_puda_buf *irdma_puda_alloc_buf(struct irdma_sc_dev *dev, u32 len) { struct irdma_puda_buf *buf; struct irdma_virt_mem buf_mem; buf_mem.size = sizeof(struct irdma_puda_buf); buf_mem.va = kzalloc(buf_mem.size, GFP_KERNEL); if (!buf_mem.va) return NULL; buf = buf_mem.va; buf->mem.size = len; buf->mem.va = kzalloc(buf->mem.size, GFP_KERNEL); if (!buf->mem.va) goto free_virt; buf->mem.pa = dma_map_single(dev->hw->device, buf->mem.va, buf->mem.size, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev->hw->device, buf->mem.pa)) { kfree(buf->mem.va); goto free_virt; } buf->buf_mem.va = buf_mem.va; buf->buf_mem.size = buf_mem.size; return buf; free_virt: kfree(buf_mem.va); return NULL; } /** * irdma_puda_dele_buf - delete buffer back to system * @dev: iwarp device * @buf: buffer to free */ static void irdma_puda_dele_buf(struct irdma_sc_dev *dev, struct irdma_puda_buf *buf) { dma_unmap_single(dev->hw->device, buf->mem.pa, buf->mem.size, DMA_BIDIRECTIONAL); kfree(buf->mem.va); kfree(buf->buf_mem.va); } /** * irdma_puda_get_next_send_wqe - return next wqe for processing * @qp: puda qp for wqe * @wqe_idx: wqe index for caller */ static __le64 *irdma_puda_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx) { int ret_code = 0; *wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring); if (!*wqe_idx) qp->swqe_polarity = !qp->swqe_polarity; IRDMA_RING_MOVE_HEAD(qp->sq_ring, ret_code); if (ret_code) return NULL; return qp->sq_base[*wqe_idx].elem; } /** * irdma_puda_poll_info - poll cq for completion * @cq: cq for poll * @info: info return for successful completion */ static int irdma_puda_poll_info(struct irdma_sc_cq *cq, struct irdma_puda_cmpl_info *info) { struct irdma_cq_uk *cq_uk = &cq->cq_uk; u64 qword0, qword2, qword3, qword6; __le64 *cqe; __le64 *ext_cqe = NULL; u64 qword7 = 0; u64 comp_ctx; bool valid_bit; bool ext_valid = 0; u32 major_err, minor_err; u32 peek_head; bool error; u8 polarity; cqe = IRDMA_GET_CURRENT_CQ_ELEM(&cq->cq_uk); get_64bit_val(cqe, 24, &qword3); valid_bit = (bool)FIELD_GET(IRDMA_CQ_VALID, qword3); if (valid_bit != cq_uk->polarity) return -ENOENT; /* Ensure CQE contents are read after valid bit is checked */ dma_rmb(); if (cq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3); if (ext_valid) { peek_head = (cq_uk->cq_ring.head + 1) % cq_uk->cq_ring.size; ext_cqe = cq_uk->cq_base[peek_head].buf; get_64bit_val(ext_cqe, 24, &qword7); polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7); if (!peek_head) polarity ^= 1; if (polarity != cq_uk->polarity) return -ENOENT; /* Ensure ext CQE contents are read after ext valid bit is checked */ dma_rmb(); IRDMA_RING_MOVE_HEAD_NOCHECK(cq_uk->cq_ring); if (!IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring)) cq_uk->polarity = !cq_uk->polarity; /* update cq tail in cq shadow memory also */ IRDMA_RING_MOVE_TAIL(cq_uk->cq_ring); } print_hex_dump_debug("PUDA: PUDA CQE", DUMP_PREFIX_OFFSET, 16, 8, cqe, 32, false); if (ext_valid) print_hex_dump_debug("PUDA: PUDA EXT-CQE", DUMP_PREFIX_OFFSET, 16, 8, ext_cqe, 32, false); error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3); if (error) { ibdev_dbg(to_ibdev(cq->dev), "PUDA: receive error\n"); major_err = (u32)(FIELD_GET(IRDMA_CQ_MAJERR, qword3)); minor_err = (u32)(FIELD_GET(IRDMA_CQ_MINERR, qword3)); info->compl_error = major_err << 16 | minor_err; return -EIO; } get_64bit_val(cqe, 0, &qword0); get_64bit_val(cqe, 16, &qword2); info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3); info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2); if (cq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3); get_64bit_val(cqe, 8, &comp_ctx); info->qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx; info->wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3); if (info->q_type == IRDMA_CQE_QTYPE_RQ) { if (ext_valid) { info->vlan_valid = (bool)FIELD_GET(IRDMA_CQ_UDVLANVALID, qword7); if (info->vlan_valid) { get_64bit_val(ext_cqe, 16, &qword6); info->vlan = (u16)FIELD_GET(IRDMA_CQ_UDVLAN, qword6); } info->smac_valid = (bool)FIELD_GET(IRDMA_CQ_UDSMACVALID, qword7); if (info->smac_valid) { get_64bit_val(ext_cqe, 16, &qword6); info->smac[0] = (u8)((qword6 >> 40) & 0xFF); info->smac[1] = (u8)((qword6 >> 32) & 0xFF); info->smac[2] = (u8)((qword6 >> 24) & 0xFF); info->smac[3] = (u8)((qword6 >> 16) & 0xFF); info->smac[4] = (u8)((qword6 >> 8) & 0xFF); info->smac[5] = (u8)(qword6 & 0xFF); } } if (cq->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) { info->vlan_valid = (bool)FIELD_GET(IRDMA_VLAN_TAG_VALID, qword3); info->l4proto = (u8)FIELD_GET(IRDMA_UDA_L4PROTO, qword2); info->l3proto = (u8)FIELD_GET(IRDMA_UDA_L3PROTO, qword2); } info->payload_len = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0); } return 0; } /** * irdma_puda_poll_cmpl - processes completion for cq * @dev: iwarp device * @cq: cq getting interrupt * @compl_err: return any completion err */ int irdma_puda_poll_cmpl(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq, u32 *compl_err) { struct irdma_qp_uk *qp; struct irdma_cq_uk *cq_uk = &cq->cq_uk; struct irdma_puda_cmpl_info info = {}; int ret = 0; struct irdma_puda_buf *buf; struct irdma_puda_rsrc *rsrc; u8 cq_type = cq->cq_type; unsigned long flags; if (cq_type == IRDMA_CQ_TYPE_ILQ || cq_type == IRDMA_CQ_TYPE_IEQ) { rsrc = (cq_type == IRDMA_CQ_TYPE_ILQ) ? cq->vsi->ilq : cq->vsi->ieq; } else { ibdev_dbg(to_ibdev(dev), "PUDA: qp_type error\n"); return -EINVAL; } ret = irdma_puda_poll_info(cq, &info); *compl_err = info.compl_error; if (ret == -ENOENT) return ret; if (ret) goto done; qp = info.qp; if (!qp || !rsrc) { ret = -EFAULT; goto done; } if (qp->qp_id != rsrc->qp_id) { ret = -EFAULT; goto done; } if (info.q_type == IRDMA_CQE_QTYPE_RQ) { buf = (struct irdma_puda_buf *)(uintptr_t) qp->rq_wrid_array[info.wqe_idx]; /* reusing so synch the buffer for CPU use */ dma_sync_single_for_cpu(dev->hw->device, buf->mem.pa, buf->mem.size, DMA_BIDIRECTIONAL); /* Get all the tcpip information in the buf header */ ret = irdma_puda_get_tcpip_info(&info, buf); if (ret) { rsrc->stats_rcvd_pkt_err++; if (cq_type == IRDMA_CQ_TYPE_ILQ) { irdma_ilq_putback_rcvbuf(&rsrc->qp, buf, info.wqe_idx); } else { irdma_puda_ret_bufpool(rsrc, buf); irdma_puda_replenish_rq(rsrc, false); } goto done; } rsrc->stats_pkt_rcvd++; rsrc->compl_rxwqe_idx = info.wqe_idx; ibdev_dbg(to_ibdev(dev), "PUDA: RQ completion\n"); rsrc->receive(rsrc->vsi, buf); if (cq_type == IRDMA_CQ_TYPE_ILQ) irdma_ilq_putback_rcvbuf(&rsrc->qp, buf, info.wqe_idx); else irdma_puda_replenish_rq(rsrc, false); } else { ibdev_dbg(to_ibdev(dev), "PUDA: SQ completion\n"); buf = (struct irdma_puda_buf *)(uintptr_t) qp->sq_wrtrk_array[info.wqe_idx].wrid; /* reusing so synch the buffer for CPU use */ dma_sync_single_for_cpu(dev->hw->device, buf->mem.pa, buf->mem.size, DMA_BIDIRECTIONAL); IRDMA_RING_SET_TAIL(qp->sq_ring, info.wqe_idx); rsrc->xmit_complete(rsrc->vsi, buf); spin_lock_irqsave(&rsrc->bufpool_lock, flags); rsrc->tx_wqe_avail_cnt++; spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); if (!list_empty(&rsrc->txpend)) irdma_puda_send_buf(rsrc, NULL); } done: IRDMA_RING_MOVE_HEAD_NOCHECK(cq_uk->cq_ring); if (!IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring)) cq_uk->polarity = !cq_uk->polarity; /* update cq tail in cq shadow memory also */ IRDMA_RING_MOVE_TAIL(cq_uk->cq_ring); set_64bit_val(cq_uk->shadow_area, 0, IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring)); return ret; } /** * irdma_puda_send - complete send wqe for transmit * @qp: puda qp for send * @info: buffer information for transmit */ int irdma_puda_send(struct irdma_sc_qp *qp, struct irdma_puda_send_info *info) { __le64 *wqe; u32 iplen, l4len; u64 hdr[2]; u32 wqe_idx; u8 iipt; /* number of 32 bits DWORDS in header */ l4len = info->tcplen >> 2; if (info->ipv4) { iipt = 3; iplen = 5; } else { iipt = 1; iplen = 10; } wqe = irdma_puda_get_next_send_wqe(&qp->qp_uk, &wqe_idx); if (!wqe) return -ENOMEM; qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid = (uintptr_t)info->scratch; /* Third line of WQE descriptor */ /* maclen is in words */ if (qp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { hdr[0] = 0; /* Dest_QPN and Dest_QKey only for UD */ hdr[1] = FIELD_PREP(IRDMA_UDA_QPSQ_OPCODE, IRDMA_OP_TYPE_SEND) | FIELD_PREP(IRDMA_UDA_QPSQ_L4LEN, l4len) | FIELD_PREP(IRDMAQPSQ_AHID, info->ah_id) | FIELD_PREP(IRDMA_UDA_QPSQ_SIGCOMPL, 1) | FIELD_PREP(IRDMA_UDA_QPSQ_VALID, qp->qp_uk.swqe_polarity); /* Forth line of WQE descriptor */ set_64bit_val(wqe, 0, info->paddr); set_64bit_val(wqe, 8, FIELD_PREP(IRDMAQPSQ_FRAG_LEN, info->len) | FIELD_PREP(IRDMA_UDA_QPSQ_VALID, qp->qp_uk.swqe_polarity)); } else { hdr[0] = FIELD_PREP(IRDMA_UDA_QPSQ_MACLEN, info->maclen >> 1) | FIELD_PREP(IRDMA_UDA_QPSQ_IPLEN, iplen) | FIELD_PREP(IRDMA_UDA_QPSQ_L4T, 1) | FIELD_PREP(IRDMA_UDA_QPSQ_IIPT, iipt) | FIELD_PREP(IRDMA_GEN1_UDA_QPSQ_L4LEN, l4len); hdr[1] = FIELD_PREP(IRDMA_UDA_QPSQ_OPCODE, IRDMA_OP_TYPE_SEND) | FIELD_PREP(IRDMA_UDA_QPSQ_SIGCOMPL, 1) | FIELD_PREP(IRDMA_UDA_QPSQ_DOLOOPBACK, info->do_lpb) | FIELD_PREP(IRDMA_UDA_QPSQ_VALID, qp->qp_uk.swqe_polarity); /* Forth line of WQE descriptor */ set_64bit_val(wqe, 0, info->paddr); set_64bit_val(wqe, 8, FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, info->len)); } set_64bit_val(wqe, 16, hdr[0]); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, hdr[1]); print_hex_dump_debug("PUDA: PUDA SEND WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, 32, false); irdma_uk_qp_post_wr(&qp->qp_uk); return 0; } /** * irdma_puda_send_buf - transmit puda buffer * @rsrc: resource to use for buffer * @buf: puda buffer to transmit */ void irdma_puda_send_buf(struct irdma_puda_rsrc *rsrc, struct irdma_puda_buf *buf) { struct irdma_puda_send_info info; int ret = 0; unsigned long flags; spin_lock_irqsave(&rsrc->bufpool_lock, flags); /* if no wqe available or not from a completion and we have * pending buffers, we must queue new buffer */ if (!rsrc->tx_wqe_avail_cnt || (buf && !list_empty(&rsrc->txpend))) { list_add_tail(&buf->list, &rsrc->txpend); spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); rsrc->stats_sent_pkt_q++; if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ) ibdev_dbg(to_ibdev(rsrc->dev), "PUDA: adding to txpend\n"); return; } rsrc->tx_wqe_avail_cnt--; /* if we are coming from a completion and have pending buffers * then Get one from pending list */ if (!buf) { buf = irdma_puda_get_listbuf(&rsrc->txpend); if (!buf) goto done; } info.scratch = buf; info.paddr = buf->mem.pa; info.len = buf->totallen; info.tcplen = buf->tcphlen; info.ipv4 = buf->ipv4; if (rsrc->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { info.ah_id = buf->ah_id; } else { info.maclen = buf->maclen; info.do_lpb = buf->do_lpb; } /* Synch buffer for use by device */ dma_sync_single_for_cpu(rsrc->dev->hw->device, buf->mem.pa, buf->mem.size, DMA_BIDIRECTIONAL); ret = irdma_puda_send(&rsrc->qp, &info); if (ret) { rsrc->tx_wqe_avail_cnt++; rsrc->stats_sent_pkt_q++; list_add(&buf->list, &rsrc->txpend); if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ) ibdev_dbg(to_ibdev(rsrc->dev), "PUDA: adding to puda_send\n"); } else { rsrc->stats_pkt_sent++; } done: spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); } /** * irdma_puda_qp_setctx - during init, set qp's context * @rsrc: qp's resource */ static void irdma_puda_qp_setctx(struct irdma_puda_rsrc *rsrc) { struct irdma_sc_qp *qp = &rsrc->qp; __le64 *qp_ctx = qp->hw_host_ctx; set_64bit_val(qp_ctx, 8, qp->sq_pa); set_64bit_val(qp_ctx, 16, qp->rq_pa); set_64bit_val(qp_ctx, 24, FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) | FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size)); set_64bit_val(qp_ctx, 48, FIELD_PREP(IRDMAQPC_SNDMSS, rsrc->buf_size)); set_64bit_val(qp_ctx, 56, 0); if (qp->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) set_64bit_val(qp_ctx, 64, 1); set_64bit_val(qp_ctx, 136, FIELD_PREP(IRDMAQPC_TXCQNUM, rsrc->cq_id) | FIELD_PREP(IRDMAQPC_RXCQNUM, rsrc->cq_id)); set_64bit_val(qp_ctx, 144, FIELD_PREP(IRDMAQPC_STAT_INDEX, rsrc->stats_idx)); set_64bit_val(qp_ctx, 160, FIELD_PREP(IRDMAQPC_PRIVEN, 1) | FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, rsrc->stats_idx_valid)); set_64bit_val(qp_ctx, 168, FIELD_PREP(IRDMAQPC_QPCOMPCTX, (uintptr_t)qp)); set_64bit_val(qp_ctx, 176, FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) | FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) | FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle)); print_hex_dump_debug("PUDA: PUDA QP CONTEXT", DUMP_PREFIX_OFFSET, 16, 8, qp_ctx, IRDMA_QP_CTX_SIZE, false); } /** * irdma_puda_qp_wqe - setup wqe for qp create * @dev: Device * @qp: Resource qp */ static int irdma_puda_qp_wqe(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp) { struct irdma_sc_cqp *cqp; __le64 *wqe; u64 hdr; struct irdma_ccq_cqe_info compl_info; int status = 0; cqp = dev->cqp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, 0); if (!wqe) return -ENOMEM; set_64bit_val(wqe, 16, qp->hw_host_ctx_pa); set_64bit_val(wqe, 40, qp->shadow_area_pa); hdr = qp->qp_uk.qp_id | FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_QP) | FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, IRDMA_QP_TYPE_UDA) | FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, 1) | FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, 2) | FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, hdr); print_hex_dump_debug("PUDA: PUDA QP CREATE", DUMP_PREFIX_OFFSET, 16, 8, wqe, 40, false); irdma_sc_cqp_post_sq(cqp); status = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_CREATE_QP, &compl_info); return status; } /** * irdma_puda_qp_create - create qp for resource * @rsrc: resource to use for buffer */ static int irdma_puda_qp_create(struct irdma_puda_rsrc *rsrc) { struct irdma_sc_qp *qp = &rsrc->qp; struct irdma_qp_uk *ukqp = &qp->qp_uk; int ret = 0; u32 sq_size, rq_size; struct irdma_dma_mem *mem; sq_size = rsrc->sq_size * IRDMA_QP_WQE_MIN_SIZE; rq_size = rsrc->rq_size * IRDMA_QP_WQE_MIN_SIZE; rsrc->qpmem.size = ALIGN((sq_size + rq_size + (IRDMA_SHADOW_AREA_SIZE << 3) + IRDMA_QP_CTX_SIZE), IRDMA_HW_PAGE_SIZE); rsrc->qpmem.va = dma_alloc_coherent(rsrc->dev->hw->device, rsrc->qpmem.size, &rsrc->qpmem.pa, GFP_KERNEL); if (!rsrc->qpmem.va) return -ENOMEM; mem = &rsrc->qpmem; memset(mem->va, 0, rsrc->qpmem.size); qp->hw_sq_size = irdma_get_encoded_wqe_size(rsrc->sq_size, IRDMA_QUEUE_TYPE_SQ_RQ); qp->hw_rq_size = irdma_get_encoded_wqe_size(rsrc->rq_size, IRDMA_QUEUE_TYPE_SQ_RQ); qp->pd = &rsrc->sc_pd; qp->qp_uk.qp_type = IRDMA_QP_TYPE_UDA; qp->dev = rsrc->dev; qp->qp_uk.back_qp = rsrc; qp->sq_pa = mem->pa; qp->rq_pa = qp->sq_pa + sq_size; qp->vsi = rsrc->vsi; ukqp->sq_base = mem->va; ukqp->rq_base = &ukqp->sq_base[rsrc->sq_size]; ukqp->shadow_area = ukqp->rq_base[rsrc->rq_size].elem; ukqp->uk_attrs = &qp->dev->hw_attrs.uk_attrs; qp->shadow_area_pa = qp->rq_pa + rq_size; qp->hw_host_ctx = ukqp->shadow_area + IRDMA_SHADOW_AREA_SIZE; qp->hw_host_ctx_pa = qp->shadow_area_pa + (IRDMA_SHADOW_AREA_SIZE << 3); qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX; ukqp->qp_id = rsrc->qp_id; ukqp->sq_wrtrk_array = rsrc->sq_wrtrk_array; ukqp->rq_wrid_array = rsrc->rq_wrid_array; ukqp->sq_size = rsrc->sq_size; ukqp->rq_size = rsrc->rq_size; IRDMA_RING_INIT(ukqp->sq_ring, ukqp->sq_size); IRDMA_RING_INIT(ukqp->initial_ring, ukqp->sq_size); IRDMA_RING_INIT(ukqp->rq_ring, ukqp->rq_size); ukqp->wqe_alloc_db = qp->pd->dev->wqe_alloc_db; ret = rsrc->dev->ws_add(qp->vsi, qp->user_pri); if (ret) { dma_free_coherent(rsrc->dev->hw->device, rsrc->qpmem.size, rsrc->qpmem.va, rsrc->qpmem.pa); rsrc->qpmem.va = NULL; return ret; } irdma_qp_add_qos(qp); irdma_puda_qp_setctx(rsrc); if (rsrc->dev->ceq_valid) ret = irdma_cqp_qp_create_cmd(rsrc->dev, qp); else ret = irdma_puda_qp_wqe(rsrc->dev, qp); if (ret) { irdma_qp_rem_qos(qp); rsrc->dev->ws_remove(qp->vsi, qp->user_pri); dma_free_coherent(rsrc->dev->hw->device, rsrc->qpmem.size, rsrc->qpmem.va, rsrc->qpmem.pa); rsrc->qpmem.va = NULL; } return ret; } /** * irdma_puda_cq_wqe - setup wqe for CQ create * @dev: Device * @cq: resource for cq */ static int irdma_puda_cq_wqe(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq) { __le64 *wqe; struct irdma_sc_cqp *cqp; u64 hdr; struct irdma_ccq_cqe_info compl_info; int status = 0; cqp = dev->cqp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, 0); if (!wqe) return -ENOMEM; set_64bit_val(wqe, 0, cq->cq_uk.cq_size); set_64bit_val(wqe, 8, (uintptr_t)cq >> 1); set_64bit_val(wqe, 16, FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, cq->shadow_read_threshold)); set_64bit_val(wqe, 32, cq->cq_pa); set_64bit_val(wqe, 40, cq->shadow_area_pa); set_64bit_val(wqe, 56, FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) | FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx)); hdr = cq->cq_uk.cq_id | FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CQ) | FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, 1) | FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, 1) | FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, 1) | FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); dma_wmb(); /* make sure WQE is written before valid bit is set */ set_64bit_val(wqe, 24, hdr); print_hex_dump_debug("PUDA: PUDA CREATE CQ", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); irdma_sc_cqp_post_sq(dev->cqp); status = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_CREATE_CQ, &compl_info); if (!status) { struct irdma_sc_ceq *ceq = dev->ceq[0]; if (ceq && ceq->reg_cq) status = irdma_sc_add_cq_ctx(ceq, cq); } return status; } /** * irdma_puda_cq_create - create cq for resource * @rsrc: resource for which cq to create */ static int irdma_puda_cq_create(struct irdma_puda_rsrc *rsrc) { struct irdma_sc_dev *dev = rsrc->dev; struct irdma_sc_cq *cq = &rsrc->cq; int ret = 0; u32 cqsize; struct irdma_dma_mem *mem; struct irdma_cq_init_info info = {}; struct irdma_cq_uk_init_info *init_info = &info.cq_uk_init_info; cq->vsi = rsrc->vsi; cqsize = rsrc->cq_size * (sizeof(struct irdma_cqe)); rsrc->cqmem.size = ALIGN(cqsize + sizeof(struct irdma_cq_shadow_area), IRDMA_CQ0_ALIGNMENT); rsrc->cqmem.va = dma_alloc_coherent(dev->hw->device, rsrc->cqmem.size, &rsrc->cqmem.pa, GFP_KERNEL); if (!rsrc->cqmem.va) return -ENOMEM; mem = &rsrc->cqmem; info.dev = dev; info.type = (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ) ? IRDMA_CQ_TYPE_ILQ : IRDMA_CQ_TYPE_IEQ; info.shadow_read_threshold = rsrc->cq_size >> 2; info.cq_base_pa = mem->pa; info.shadow_area_pa = mem->pa + cqsize; init_info->cq_base = mem->va; init_info->shadow_area = (__le64 *)((u8 *)mem->va + cqsize); init_info->cq_size = rsrc->cq_size; init_info->cq_id = rsrc->cq_id; info.ceqe_mask = true; info.ceq_id_valid = true; info.vsi = rsrc->vsi; ret = irdma_sc_cq_init(cq, &info); if (ret) goto error; if (rsrc->dev->ceq_valid) ret = irdma_cqp_cq_create_cmd(dev, cq); else ret = irdma_puda_cq_wqe(dev, cq); error: if (ret) { dma_free_coherent(dev->hw->device, rsrc->cqmem.size, rsrc->cqmem.va, rsrc->cqmem.pa); rsrc->cqmem.va = NULL; } return ret; } /** * irdma_puda_free_qp - free qp for resource * @rsrc: resource for which qp to free */ static void irdma_puda_free_qp(struct irdma_puda_rsrc *rsrc) { int ret; struct irdma_ccq_cqe_info compl_info; struct irdma_sc_dev *dev = rsrc->dev; if (rsrc->dev->ceq_valid) { irdma_cqp_qp_destroy_cmd(dev, &rsrc->qp); rsrc->dev->ws_remove(rsrc->qp.vsi, rsrc->qp.user_pri); return; } ret = irdma_sc_qp_destroy(&rsrc->qp, 0, false, true, true); if (ret) ibdev_dbg(to_ibdev(dev), "PUDA: error puda qp destroy wqe, status = %d\n", ret); if (!ret) { ret = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_DESTROY_QP, &compl_info); if (ret) ibdev_dbg(to_ibdev(dev), "PUDA: error puda qp destroy failed, status = %d\n", ret); } rsrc->dev->ws_remove(rsrc->qp.vsi, rsrc->qp.user_pri); } /** * irdma_puda_free_cq - free cq for resource * @rsrc: resource for which cq to free */ static void irdma_puda_free_cq(struct irdma_puda_rsrc *rsrc) { int ret; struct irdma_ccq_cqe_info compl_info; struct irdma_sc_dev *dev = rsrc->dev; if (rsrc->dev->ceq_valid) { irdma_cqp_cq_destroy_cmd(dev, &rsrc->cq); return; } ret = irdma_sc_cq_destroy(&rsrc->cq, 0, true); if (ret) ibdev_dbg(to_ibdev(dev), "PUDA: error ieq cq destroy\n"); if (!ret) { ret = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_DESTROY_CQ, &compl_info); if (ret) ibdev_dbg(to_ibdev(dev), "PUDA: error ieq qp destroy done\n"); } } /** * irdma_puda_dele_rsrc - delete all resources during close * @vsi: VSI structure of device * @type: type of resource to dele * @reset: true if reset chip */ void irdma_puda_dele_rsrc(struct irdma_sc_vsi *vsi, enum puda_rsrc_type type, bool reset) { struct irdma_sc_dev *dev = vsi->dev; struct irdma_puda_rsrc *rsrc; struct irdma_puda_buf *buf = NULL; struct irdma_puda_buf *nextbuf = NULL; struct irdma_virt_mem *vmem; struct irdma_sc_ceq *ceq; ceq = vsi->dev->ceq[0]; switch (type) { case IRDMA_PUDA_RSRC_TYPE_ILQ: rsrc = vsi->ilq; vmem = &vsi->ilq_mem; vsi->ilq = NULL; if (ceq && ceq->reg_cq) irdma_sc_remove_cq_ctx(ceq, &rsrc->cq); break; case IRDMA_PUDA_RSRC_TYPE_IEQ: rsrc = vsi->ieq; vmem = &vsi->ieq_mem; vsi->ieq = NULL; if (ceq && ceq->reg_cq) irdma_sc_remove_cq_ctx(ceq, &rsrc->cq); break; default: ibdev_dbg(to_ibdev(dev), "PUDA: error resource type = 0x%x\n", type); return; } switch (rsrc->cmpl) { case PUDA_HASH_CRC_COMPLETE: irdma_free_hash_desc(rsrc->hash_desc); fallthrough; case PUDA_QP_CREATED: irdma_qp_rem_qos(&rsrc->qp); if (!reset) irdma_puda_free_qp(rsrc); dma_free_coherent(dev->hw->device, rsrc->qpmem.size, rsrc->qpmem.va, rsrc->qpmem.pa); rsrc->qpmem.va = NULL; fallthrough; case PUDA_CQ_CREATED: if (!reset) irdma_puda_free_cq(rsrc); dma_free_coherent(dev->hw->device, rsrc->cqmem.size, rsrc->cqmem.va, rsrc->cqmem.pa); rsrc->cqmem.va = NULL; break; default: ibdev_dbg(to_ibdev(rsrc->dev), "PUDA: error no resources\n"); break; } /* Free all allocated puda buffers for both tx and rx */ buf = rsrc->alloclist; while (buf) { nextbuf = buf->next; irdma_puda_dele_buf(dev, buf); buf = nextbuf; rsrc->alloc_buf_count--; } kfree(vmem->va); } /** * irdma_puda_allocbufs - allocate buffers for resource * @rsrc: resource for buffer allocation * @count: number of buffers to create */ static int irdma_puda_allocbufs(struct irdma_puda_rsrc *rsrc, u32 count) { u32 i; struct irdma_puda_buf *buf; struct irdma_puda_buf *nextbuf; for (i = 0; i < count; i++) { buf = irdma_puda_alloc_buf(rsrc->dev, rsrc->buf_size); if (!buf) { rsrc->stats_buf_alloc_fail++; return -ENOMEM; } irdma_puda_ret_bufpool(rsrc, buf); rsrc->alloc_buf_count++; if (!rsrc->alloclist) { rsrc->alloclist = buf; } else { nextbuf = rsrc->alloclist; rsrc->alloclist = buf; buf->next = nextbuf; } } rsrc->avail_buf_count = rsrc->alloc_buf_count; return 0; } /** * irdma_puda_create_rsrc - create resource (ilq or ieq) * @vsi: sc VSI struct * @info: resource information */ int irdma_puda_create_rsrc(struct irdma_sc_vsi *vsi, struct irdma_puda_rsrc_info *info) { struct irdma_sc_dev *dev = vsi->dev; int ret = 0; struct irdma_puda_rsrc *rsrc; u32 pudasize; u32 sqwridsize, rqwridsize; struct irdma_virt_mem *vmem; info->count = 1; pudasize = sizeof(struct irdma_puda_rsrc); sqwridsize = info->sq_size * sizeof(struct irdma_sq_uk_wr_trk_info); rqwridsize = info->rq_size * 8; switch (info->type) { case IRDMA_PUDA_RSRC_TYPE_ILQ: vmem = &vsi->ilq_mem; break; case IRDMA_PUDA_RSRC_TYPE_IEQ: vmem = &vsi->ieq_mem; break; default: return -EOPNOTSUPP; } vmem->size = pudasize + sqwridsize + rqwridsize; vmem->va = kzalloc(vmem->size, GFP_KERNEL); if (!vmem->va) return -ENOMEM; rsrc = vmem->va; spin_lock_init(&rsrc->bufpool_lock); switch (info->type) { case IRDMA_PUDA_RSRC_TYPE_ILQ: vsi->ilq = vmem->va; vsi->ilq_count = info->count; rsrc->receive = info->receive; rsrc->xmit_complete = info->xmit_complete; break; case IRDMA_PUDA_RSRC_TYPE_IEQ: vsi->ieq_count = info->count; vsi->ieq = vmem->va; rsrc->receive = irdma_ieq_receive; rsrc->xmit_complete = irdma_ieq_tx_compl; break; default: return -EOPNOTSUPP; } rsrc->type = info->type; rsrc->sq_wrtrk_array = (struct irdma_sq_uk_wr_trk_info *) ((u8 *)vmem->va + pudasize); rsrc->rq_wrid_array = (u64 *)((u8 *)vmem->va + pudasize + sqwridsize); /* Initialize all ieq lists */ INIT_LIST_HEAD(&rsrc->bufpool); INIT_LIST_HEAD(&rsrc->txpend); rsrc->tx_wqe_avail_cnt = info->sq_size - 1; irdma_sc_pd_init(dev, &rsrc->sc_pd, info->pd_id, info->abi_ver); rsrc->qp_id = info->qp_id; rsrc->cq_id = info->cq_id; rsrc->sq_size = info->sq_size; rsrc->rq_size = info->rq_size; rsrc->cq_size = info->rq_size + info->sq_size; if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ) rsrc->cq_size += info->rq_size; } rsrc->buf_size = info->buf_size; rsrc->dev = dev; rsrc->vsi = vsi; rsrc->stats_idx = info->stats_idx; rsrc->stats_idx_valid = info->stats_idx_valid; ret = irdma_puda_cq_create(rsrc); if (!ret) { rsrc->cmpl = PUDA_CQ_CREATED; ret = irdma_puda_qp_create(rsrc); } if (ret) { ibdev_dbg(to_ibdev(dev), "PUDA: error qp_create type=%d, status=%d\n", rsrc->type, ret); goto error; } rsrc->cmpl = PUDA_QP_CREATED; ret = irdma_puda_allocbufs(rsrc, info->tx_buf_cnt + info->rq_size); if (ret) { ibdev_dbg(to_ibdev(dev), "PUDA: error alloc_buf\n"); goto error; } rsrc->rxq_invalid_cnt = info->rq_size; ret = irdma_puda_replenish_rq(rsrc, true); if (ret) goto error; if (info->type == IRDMA_PUDA_RSRC_TYPE_IEQ) { if (!irdma_init_hash_desc(&rsrc->hash_desc)) { rsrc->check_crc = true; rsrc->cmpl = PUDA_HASH_CRC_COMPLETE; ret = 0; } } irdma_sc_ccq_arm(&rsrc->cq); return ret; error: irdma_puda_dele_rsrc(vsi, info->type, false); return ret; } /** * irdma_ilq_putback_rcvbuf - ilq buffer to put back on rq * @qp: ilq's qp resource * @buf: puda buffer for rcv q * @wqe_idx: wqe index of completed rcvbuf */ static void irdma_ilq_putback_rcvbuf(struct irdma_sc_qp *qp, struct irdma_puda_buf *buf, u32 wqe_idx) { __le64 *wqe; u64 offset8, offset24; /* Synch buffer for use by device */ dma_sync_single_for_device(qp->dev->hw->device, buf->mem.pa, buf->mem.size, DMA_BIDIRECTIONAL); wqe = qp->qp_uk.rq_base[wqe_idx].elem; get_64bit_val(wqe, 24, &offset24); if (qp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { get_64bit_val(wqe, 8, &offset8); if (offset24) offset8 &= ~FIELD_PREP(IRDMAQPSQ_VALID, 1); else offset8 |= FIELD_PREP(IRDMAQPSQ_VALID, 1); set_64bit_val(wqe, 8, offset8); dma_wmb(); /* make sure WQE is written before valid bit is set */ } if (offset24) offset24 = 0; else offset24 = FIELD_PREP(IRDMAQPSQ_VALID, 1); set_64bit_val(wqe, 24, offset24); } /** * irdma_ieq_get_fpdu_len - get length of fpdu with or without marker * @pfpdu: pointer to fpdu * @datap: pointer to data in the buffer * @rcv_seq: seqnum of the data buffer */ static u16 irdma_ieq_get_fpdu_len(struct irdma_pfpdu *pfpdu, u8 *datap, u32 rcv_seq) { u32 marker_seq, end_seq, blk_start; u8 marker_len = pfpdu->marker_len; u16 total_len = 0; u16 fpdu_len; blk_start = (pfpdu->rcv_start_seq - rcv_seq) & (IRDMA_MRK_BLK_SZ - 1); if (!blk_start) { total_len = marker_len; marker_seq = rcv_seq + IRDMA_MRK_BLK_SZ; if (marker_len && *(u32 *)datap) return 0; } else { marker_seq = rcv_seq + blk_start; } datap += total_len; fpdu_len = ntohs(*(__be16 *)datap); fpdu_len += IRDMA_IEQ_MPA_FRAMING; fpdu_len = (fpdu_len + 3) & 0xfffc; if (fpdu_len > pfpdu->max_fpdu_data) return 0; total_len += fpdu_len; end_seq = rcv_seq + total_len; while ((int)(marker_seq - end_seq) < 0) { total_len += marker_len; end_seq += marker_len; marker_seq += IRDMA_MRK_BLK_SZ; } return total_len; } /** * irdma_ieq_copy_to_txbuf - copydata from rcv buf to tx buf * @buf: rcv buffer with partial * @txbuf: tx buffer for sending back * @buf_offset: rcv buffer offset to copy from * @txbuf_offset: at offset in tx buf to copy * @len: length of data to copy */ static void irdma_ieq_copy_to_txbuf(struct irdma_puda_buf *buf, struct irdma_puda_buf *txbuf, u16 buf_offset, u32 txbuf_offset, u32 len) { void *mem1 = (u8 *)buf->mem.va + buf_offset; void *mem2 = (u8 *)txbuf->mem.va + txbuf_offset; memcpy(mem2, mem1, len); } /** * irdma_ieq_setup_tx_buf - setup tx buffer for partial handling * @buf: reeive buffer with partial * @txbuf: buffer to prepare */ static void irdma_ieq_setup_tx_buf(struct irdma_puda_buf *buf, struct irdma_puda_buf *txbuf) { txbuf->tcphlen = buf->tcphlen; txbuf->ipv4 = buf->ipv4; if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { txbuf->hdrlen = txbuf->tcphlen; irdma_ieq_copy_to_txbuf(buf, txbuf, IRDMA_TCP_OFFSET, 0, txbuf->hdrlen); } else { txbuf->maclen = buf->maclen; txbuf->hdrlen = buf->hdrlen; irdma_ieq_copy_to_txbuf(buf, txbuf, 0, 0, buf->hdrlen); } } /** * irdma_ieq_check_first_buf - check if rcv buffer's seq is in range * @buf: receive exception buffer * @fps: first partial sequence number */ static void irdma_ieq_check_first_buf(struct irdma_puda_buf *buf, u32 fps) { u32 offset; if (buf->seqnum < fps) { offset = fps - buf->seqnum; if (offset > buf->datalen) return; buf->data += offset; buf->datalen -= (u16)offset; buf->seqnum = fps; } } /** * irdma_ieq_compl_pfpdu - write txbuf with full fpdu * @ieq: ieq resource * @rxlist: ieq's received buffer list * @pbufl: temporary list for buffers for fpddu * @txbuf: tx buffer for fpdu * @fpdu_len: total length of fpdu */ static void irdma_ieq_compl_pfpdu(struct irdma_puda_rsrc *ieq, struct list_head *rxlist, struct list_head *pbufl, struct irdma_puda_buf *txbuf, u16 fpdu_len) { struct irdma_puda_buf *buf; u32 nextseqnum; u16 txoffset, bufoffset; buf = irdma_puda_get_listbuf(pbufl); if (!buf) return; nextseqnum = buf->seqnum + fpdu_len; irdma_ieq_setup_tx_buf(buf, txbuf); if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { txoffset = txbuf->hdrlen; txbuf->totallen = txbuf->hdrlen + fpdu_len; txbuf->data = (u8 *)txbuf->mem.va + txoffset; } else { txoffset = buf->hdrlen; txbuf->totallen = buf->hdrlen + fpdu_len; txbuf->data = (u8 *)txbuf->mem.va + buf->hdrlen; } bufoffset = (u16)(buf->data - (u8 *)buf->mem.va); do { if (buf->datalen >= fpdu_len) { /* copied full fpdu */ irdma_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset, fpdu_len); buf->datalen -= fpdu_len; buf->data += fpdu_len; buf->seqnum = nextseqnum; break; } /* copy partial fpdu */ irdma_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset, buf->datalen); txoffset += buf->datalen; fpdu_len -= buf->datalen; irdma_puda_ret_bufpool(ieq, buf); buf = irdma_puda_get_listbuf(pbufl); if (!buf) return; bufoffset = (u16)(buf->data - (u8 *)buf->mem.va); } while (1); /* last buffer on the list*/ if (buf->datalen) list_add(&buf->list, rxlist); else irdma_puda_ret_bufpool(ieq, buf); } /** * irdma_ieq_create_pbufl - create buffer list for single fpdu * @pfpdu: pointer to fpdu * @rxlist: resource list for receive ieq buffes * @pbufl: temp. list for buffers for fpddu * @buf: first receive buffer * @fpdu_len: total length of fpdu */ static int irdma_ieq_create_pbufl(struct irdma_pfpdu *pfpdu, struct list_head *rxlist, struct list_head *pbufl, struct irdma_puda_buf *buf, u16 fpdu_len) { int status = 0; struct irdma_puda_buf *nextbuf; u32 nextseqnum; u16 plen = fpdu_len - buf->datalen; bool done = false; nextseqnum = buf->seqnum + buf->datalen; do { nextbuf = irdma_puda_get_listbuf(rxlist); if (!nextbuf) { status = -ENOBUFS; break; } list_add_tail(&nextbuf->list, pbufl); if (nextbuf->seqnum != nextseqnum) { pfpdu->bad_seq_num++; status = -ERANGE; break; } if (nextbuf->datalen >= plen) { done = true; } else { plen -= nextbuf->datalen; nextseqnum = nextbuf->seqnum + nextbuf->datalen; } } while (!done); return status; } /** * irdma_ieq_handle_partial - process partial fpdu buffer * @ieq: ieq resource * @pfpdu: partial management per user qp * @buf: receive buffer * @fpdu_len: fpdu len in the buffer */ static int irdma_ieq_handle_partial(struct irdma_puda_rsrc *ieq, struct irdma_pfpdu *pfpdu, struct irdma_puda_buf *buf, u16 fpdu_len) { int status = 0; u8 *crcptr; u32 mpacrc; u32 seqnum = buf->seqnum; struct list_head pbufl; /* partial buffer list */ struct irdma_puda_buf *txbuf = NULL; struct list_head *rxlist = &pfpdu->rxlist; ieq->partials_handled++; INIT_LIST_HEAD(&pbufl); list_add(&buf->list, &pbufl); status = irdma_ieq_create_pbufl(pfpdu, rxlist, &pbufl, buf, fpdu_len); if (status) goto error; txbuf = irdma_puda_get_bufpool(ieq); if (!txbuf) { pfpdu->no_tx_bufs++; status = -ENOBUFS; goto error; } irdma_ieq_compl_pfpdu(ieq, rxlist, &pbufl, txbuf, fpdu_len); irdma_ieq_update_tcpip_info(txbuf, fpdu_len, seqnum); crcptr = txbuf->data + fpdu_len - 4; mpacrc = *(u32 *)crcptr; if (ieq->check_crc) { status = irdma_ieq_check_mpacrc(ieq->hash_desc, txbuf->data, (fpdu_len - 4), mpacrc); if (status) { ibdev_dbg(to_ibdev(ieq->dev), "IEQ: error bad crc\n"); goto error; } } print_hex_dump_debug("IEQ: IEQ TX BUFFER", DUMP_PREFIX_OFFSET, 16, 8, txbuf->mem.va, txbuf->totallen, false); if (ieq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) txbuf->ah_id = pfpdu->ah->ah_info.ah_idx; txbuf->do_lpb = true; irdma_puda_send_buf(ieq, txbuf); pfpdu->rcv_nxt = seqnum + fpdu_len; return status; error: while (!list_empty(&pbufl)) { buf = list_last_entry(&pbufl, struct irdma_puda_buf, list); list_move(&buf->list, rxlist); } if (txbuf) irdma_puda_ret_bufpool(ieq, txbuf); return status; } /** * irdma_ieq_process_buf - process buffer rcvd for ieq * @ieq: ieq resource * @pfpdu: partial management per user qp * @buf: receive buffer */ static int irdma_ieq_process_buf(struct irdma_puda_rsrc *ieq, struct irdma_pfpdu *pfpdu, struct irdma_puda_buf *buf) { u16 fpdu_len = 0; u16 datalen = buf->datalen; u8 *datap = buf->data; u8 *crcptr; u16 ioffset = 0; u32 mpacrc; u32 seqnum = buf->seqnum; u16 len = 0; u16 full = 0; bool partial = false; struct irdma_puda_buf *txbuf; struct list_head *rxlist = &pfpdu->rxlist; int ret = 0; ioffset = (u16)(buf->data - (u8 *)buf->mem.va); while (datalen) { fpdu_len = irdma_ieq_get_fpdu_len(pfpdu, datap, buf->seqnum); if (!fpdu_len) { ibdev_dbg(to_ibdev(ieq->dev), "IEQ: error bad fpdu len\n"); list_add(&buf->list, rxlist); return -EINVAL; } if (datalen < fpdu_len) { partial = true; break; } crcptr = datap + fpdu_len - 4; mpacrc = *(u32 *)crcptr; if (ieq->check_crc) ret = irdma_ieq_check_mpacrc(ieq->hash_desc, datap, fpdu_len - 4, mpacrc); if (ret) { list_add(&buf->list, rxlist); ibdev_dbg(to_ibdev(ieq->dev), "ERR: IRDMA_ERR_MPA_CRC\n"); return -EINVAL; } full++; pfpdu->fpdu_processed++; ieq->fpdu_processed++; datap += fpdu_len; len += fpdu_len; datalen -= fpdu_len; } if (full) { /* copy full pdu's in the txbuf and send them out */ txbuf = irdma_puda_get_bufpool(ieq); if (!txbuf) { pfpdu->no_tx_bufs++; list_add(&buf->list, rxlist); return -ENOBUFS; } /* modify txbuf's buffer header */ irdma_ieq_setup_tx_buf(buf, txbuf); /* copy full fpdu's to new buffer */ if (ieq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { irdma_ieq_copy_to_txbuf(buf, txbuf, ioffset, txbuf->hdrlen, len); txbuf->totallen = txbuf->hdrlen + len; txbuf->ah_id = pfpdu->ah->ah_info.ah_idx; } else { irdma_ieq_copy_to_txbuf(buf, txbuf, ioffset, buf->hdrlen, len); txbuf->totallen = buf->hdrlen + len; } irdma_ieq_update_tcpip_info(txbuf, len, buf->seqnum); print_hex_dump_debug("IEQ: IEQ TX BUFFER", DUMP_PREFIX_OFFSET, 16, 8, txbuf->mem.va, txbuf->totallen, false); txbuf->do_lpb = true; irdma_puda_send_buf(ieq, txbuf); if (!datalen) { pfpdu->rcv_nxt = buf->seqnum + len; irdma_puda_ret_bufpool(ieq, buf); return 0; } buf->data = datap; buf->seqnum = seqnum + len; buf->datalen = datalen; pfpdu->rcv_nxt = buf->seqnum; } if (partial) return irdma_ieq_handle_partial(ieq, pfpdu, buf, fpdu_len); return 0; } /** * irdma_ieq_process_fpdus - process fpdu's buffers on its list * @qp: qp for which partial fpdus * @ieq: ieq resource */ void irdma_ieq_process_fpdus(struct irdma_sc_qp *qp, struct irdma_puda_rsrc *ieq) { struct irdma_pfpdu *pfpdu = &qp->pfpdu; struct list_head *rxlist = &pfpdu->rxlist; struct irdma_puda_buf *buf; int status; do { if (list_empty(rxlist)) break; buf = irdma_puda_get_listbuf(rxlist); if (!buf) { ibdev_dbg(to_ibdev(ieq->dev), "IEQ: error no buf\n"); break; } if (buf->seqnum != pfpdu->rcv_nxt) { /* This could be out of order or missing packet */ pfpdu->out_of_order++; list_add(&buf->list, rxlist); break; } /* keep processing buffers from the head of the list */ status = irdma_ieq_process_buf(ieq, pfpdu, buf); if (status == -EINVAL) { pfpdu->mpa_crc_err = true; while (!list_empty(rxlist)) { buf = irdma_puda_get_listbuf(rxlist); irdma_puda_ret_bufpool(ieq, buf); pfpdu->crc_err++; ieq->crc_err++; } /* create CQP for AE */ irdma_ieq_mpa_crc_ae(ieq->dev, qp); } } while (!status); } /** * irdma_ieq_create_ah - create an address handle for IEQ * @qp: qp pointer * @buf: buf received on IEQ used to create AH */ static int irdma_ieq_create_ah(struct irdma_sc_qp *qp, struct irdma_puda_buf *buf) { struct irdma_ah_info ah_info = {}; qp->pfpdu.ah_buf = buf; irdma_puda_ieq_get_ah_info(qp, &ah_info); return irdma_puda_create_ah(qp->vsi->dev, &ah_info, false, IRDMA_PUDA_RSRC_TYPE_IEQ, qp, &qp->pfpdu.ah); } /** * irdma_ieq_handle_exception - handle qp's exception * @ieq: ieq resource * @qp: qp receiving excpetion * @buf: receive buffer */ static void irdma_ieq_handle_exception(struct irdma_puda_rsrc *ieq, struct irdma_sc_qp *qp, struct irdma_puda_buf *buf) { struct irdma_pfpdu *pfpdu = &qp->pfpdu; u32 *hw_host_ctx = (u32 *)qp->hw_host_ctx; u32 rcv_wnd = hw_host_ctx[23]; /* first partial seq # in q2 */ u32 fps = *(u32 *)(qp->q2_buf + Q2_FPSN_OFFSET); struct list_head *rxlist = &pfpdu->rxlist; unsigned long flags = 0; u8 hw_rev = qp->dev->hw_attrs.uk_attrs.hw_rev; print_hex_dump_debug("IEQ: IEQ RX BUFFER", DUMP_PREFIX_OFFSET, 16, 8, buf->mem.va, buf->totallen, false); spin_lock_irqsave(&pfpdu->lock, flags); pfpdu->total_ieq_bufs++; if (pfpdu->mpa_crc_err) { pfpdu->crc_err++; goto error; } if (pfpdu->mode && fps != pfpdu->fps) { /* clean up qp as it is new partial sequence */ irdma_ieq_cleanup_qp(ieq, qp); ibdev_dbg(to_ibdev(ieq->dev), "IEQ: restarting new partial\n"); pfpdu->mode = false; } if (!pfpdu->mode) { print_hex_dump_debug("IEQ: Q2 BUFFER", DUMP_PREFIX_OFFSET, 16, 8, (u64 *)qp->q2_buf, 128, false); /* First_Partial_Sequence_Number check */ pfpdu->rcv_nxt = fps; pfpdu->fps = fps; pfpdu->mode = true; pfpdu->max_fpdu_data = (buf->ipv4) ? (ieq->vsi->mtu - IRDMA_MTU_TO_MSS_IPV4) : (ieq->vsi->mtu - IRDMA_MTU_TO_MSS_IPV6); pfpdu->pmode_count++; ieq->pmode_count++; INIT_LIST_HEAD(rxlist); irdma_ieq_check_first_buf(buf, fps); } if (!(rcv_wnd >= (buf->seqnum - pfpdu->rcv_nxt))) { pfpdu->bad_seq_num++; ieq->bad_seq_num++; goto error; } if (!list_empty(rxlist)) { if (buf->seqnum != pfpdu->nextseqnum) { irdma_send_ieq_ack(qp); /* throw away out-of-order, duplicates*/ goto error; } } /* Insert buf before head */ list_add_tail(&buf->list, rxlist); pfpdu->nextseqnum = buf->seqnum + buf->datalen; pfpdu->lastrcv_buf = buf; if (hw_rev >= IRDMA_GEN_2 && !pfpdu->ah) { irdma_ieq_create_ah(qp, buf); if (!pfpdu->ah) goto error; goto exit; } if (hw_rev == IRDMA_GEN_1) irdma_ieq_process_fpdus(qp, ieq); else if (pfpdu->ah && pfpdu->ah->ah_info.ah_valid) irdma_ieq_process_fpdus(qp, ieq); exit: spin_unlock_irqrestore(&pfpdu->lock, flags); return; error: irdma_puda_ret_bufpool(ieq, buf); spin_unlock_irqrestore(&pfpdu->lock, flags); } /** * irdma_ieq_receive - received exception buffer * @vsi: VSI of device * @buf: exception buffer received */ static void irdma_ieq_receive(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *buf) { struct irdma_puda_rsrc *ieq = vsi->ieq; struct irdma_sc_qp *qp = NULL; u32 wqe_idx = ieq->compl_rxwqe_idx; qp = irdma_ieq_get_qp(vsi->dev, buf); if (!qp) { ieq->stats_bad_qp_id++; irdma_puda_ret_bufpool(ieq, buf); } else { irdma_ieq_handle_exception(ieq, qp, buf); } /* * ieq->rx_wqe_idx is used by irdma_puda_replenish_rq() * on which wqe_idx to start replenish rq */ if (!ieq->rxq_invalid_cnt) ieq->rx_wqe_idx = wqe_idx; ieq->rxq_invalid_cnt++; } /** * irdma_ieq_tx_compl - put back after sending completed exception buffer * @vsi: sc VSI struct * @sqwrid: pointer to puda buffer */ static void irdma_ieq_tx_compl(struct irdma_sc_vsi *vsi, void *sqwrid) { struct irdma_puda_rsrc *ieq = vsi->ieq; struct irdma_puda_buf *buf = sqwrid; irdma_puda_ret_bufpool(ieq, buf); } /** * irdma_ieq_cleanup_qp - qp is being destroyed * @ieq: ieq resource * @qp: all pending fpdu buffers */ void irdma_ieq_cleanup_qp(struct irdma_puda_rsrc *ieq, struct irdma_sc_qp *qp) { struct irdma_puda_buf *buf; struct irdma_pfpdu *pfpdu = &qp->pfpdu; struct list_head *rxlist = &pfpdu->rxlist; if (qp->pfpdu.ah) { irdma_puda_free_ah(ieq->dev, qp->pfpdu.ah); qp->pfpdu.ah = NULL; qp->pfpdu.ah_buf = NULL; } if (!pfpdu->mode) return; while (!list_empty(rxlist)) { buf = irdma_puda_get_listbuf(rxlist); irdma_puda_ret_bufpool(ieq, buf); } }
linux-master
drivers/infiniband/hw/irdma/puda.c
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB /* Copyright (c) 2015 - 2021 Intel Corporation */ #include "main.h" /** * irdma_arp_table -manage arp table * @rf: RDMA PCI function * @ip_addr: ip address for device * @ipv4: IPv4 flag * @mac_addr: mac address ptr * @action: modify, delete or add */ int irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr, bool ipv4, const u8 *mac_addr, u32 action) { unsigned long flags; int arp_index; u32 ip[4] = {}; if (ipv4) ip[0] = *ip_addr; else memcpy(ip, ip_addr, sizeof(ip)); spin_lock_irqsave(&rf->arp_lock, flags); for (arp_index = 0; (u32)arp_index < rf->arp_table_size; arp_index++) { if (!memcmp(rf->arp_table[arp_index].ip_addr, ip, sizeof(ip))) break; } switch (action) { case IRDMA_ARP_ADD: if (arp_index != rf->arp_table_size) { arp_index = -1; break; } arp_index = 0; if (irdma_alloc_rsrc(rf, rf->allocated_arps, rf->arp_table_size, (u32 *)&arp_index, &rf->next_arp_index)) { arp_index = -1; break; } memcpy(rf->arp_table[arp_index].ip_addr, ip, sizeof(rf->arp_table[arp_index].ip_addr)); ether_addr_copy(rf->arp_table[arp_index].mac_addr, mac_addr); break; case IRDMA_ARP_RESOLVE: if (arp_index == rf->arp_table_size) arp_index = -1; break; case IRDMA_ARP_DELETE: if (arp_index == rf->arp_table_size) { arp_index = -1; break; } memset(rf->arp_table[arp_index].ip_addr, 0, sizeof(rf->arp_table[arp_index].ip_addr)); eth_zero_addr(rf->arp_table[arp_index].mac_addr); irdma_free_rsrc(rf, rf->allocated_arps, arp_index); break; default: arp_index = -1; break; } spin_unlock_irqrestore(&rf->arp_lock, flags); return arp_index; } /** * irdma_add_arp - add a new arp entry if needed * @rf: RDMA function * @ip: IP address * @ipv4: IPv4 flag * @mac: MAC address */ int irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, bool ipv4, const u8 *mac) { int arpidx; arpidx = irdma_arp_table(rf, &ip[0], ipv4, NULL, IRDMA_ARP_RESOLVE); if (arpidx >= 0) { if (ether_addr_equal(rf->arp_table[arpidx].mac_addr, mac)) return arpidx; irdma_manage_arp_cache(rf, rf->arp_table[arpidx].mac_addr, ip, ipv4, IRDMA_ARP_DELETE); } irdma_manage_arp_cache(rf, mac, ip, ipv4, IRDMA_ARP_ADD); return irdma_arp_table(rf, ip, ipv4, NULL, IRDMA_ARP_RESOLVE); } /** * wr32 - write 32 bits to hw register * @hw: hardware information including registers * @reg: register offset * @val: value to write to register */ inline void wr32(struct irdma_hw *hw, u32 reg, u32 val) { writel(val, hw->hw_addr + reg); } /** * rd32 - read a 32 bit hw register * @hw: hardware information including registers * @reg: register offset * * Return value of register content */ inline u32 rd32(struct irdma_hw *hw, u32 reg) { return readl(hw->hw_addr + reg); } /** * rd64 - read a 64 bit hw register * @hw: hardware information including registers * @reg: register offset * * Return value of register content */ inline u64 rd64(struct irdma_hw *hw, u32 reg) { return readq(hw->hw_addr + reg); } static void irdma_gid_change_event(struct ib_device *ibdev) { struct ib_event ib_event; ib_event.event = IB_EVENT_GID_CHANGE; ib_event.device = ibdev; ib_event.element.port_num = 1; ib_dispatch_event(&ib_event); } /** * irdma_inetaddr_event - system notifier for ipv4 addr events * @notifier: not used * @event: event for notifier * @ptr: if address */ int irdma_inetaddr_event(struct notifier_block *notifier, unsigned long event, void *ptr) { struct in_ifaddr *ifa = ptr; struct net_device *real_dev, *netdev = ifa->ifa_dev->dev; struct irdma_device *iwdev; struct ib_device *ibdev; u32 local_ipaddr; real_dev = rdma_vlan_dev_real_dev(netdev); if (!real_dev) real_dev = netdev; ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA); if (!ibdev) return NOTIFY_DONE; iwdev = to_iwdev(ibdev); local_ipaddr = ntohl(ifa->ifa_address); ibdev_dbg(&iwdev->ibdev, "DEV: netdev %p event %lu local_ip=%pI4 MAC=%pM\n", real_dev, event, &local_ipaddr, real_dev->dev_addr); switch (event) { case NETDEV_DOWN: irdma_manage_arp_cache(iwdev->rf, real_dev->dev_addr, &local_ipaddr, true, IRDMA_ARP_DELETE); irdma_if_notify(iwdev, real_dev, &local_ipaddr, true, false); irdma_gid_change_event(&iwdev->ibdev); break; case NETDEV_UP: case NETDEV_CHANGEADDR: irdma_add_arp(iwdev->rf, &local_ipaddr, true, real_dev->dev_addr); irdma_if_notify(iwdev, real_dev, &local_ipaddr, true, true); irdma_gid_change_event(&iwdev->ibdev); break; default: break; } ib_device_put(ibdev); return NOTIFY_DONE; } /** * irdma_inet6addr_event - system notifier for ipv6 addr events * @notifier: not used * @event: event for notifier * @ptr: if address */ int irdma_inet6addr_event(struct notifier_block *notifier, unsigned long event, void *ptr) { struct inet6_ifaddr *ifa = ptr; struct net_device *real_dev, *netdev = ifa->idev->dev; struct irdma_device *iwdev; struct ib_device *ibdev; u32 local_ipaddr6[4]; real_dev = rdma_vlan_dev_real_dev(netdev); if (!real_dev) real_dev = netdev; ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA); if (!ibdev) return NOTIFY_DONE; iwdev = to_iwdev(ibdev); irdma_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32); ibdev_dbg(&iwdev->ibdev, "DEV: netdev %p event %lu local_ip=%pI6 MAC=%pM\n", real_dev, event, local_ipaddr6, real_dev->dev_addr); switch (event) { case NETDEV_DOWN: irdma_manage_arp_cache(iwdev->rf, real_dev->dev_addr, local_ipaddr6, false, IRDMA_ARP_DELETE); irdma_if_notify(iwdev, real_dev, local_ipaddr6, false, false); irdma_gid_change_event(&iwdev->ibdev); break; case NETDEV_UP: case NETDEV_CHANGEADDR: irdma_add_arp(iwdev->rf, local_ipaddr6, false, real_dev->dev_addr); irdma_if_notify(iwdev, real_dev, local_ipaddr6, false, true); irdma_gid_change_event(&iwdev->ibdev); break; default: break; } ib_device_put(ibdev); return NOTIFY_DONE; } /** * irdma_net_event - system notifier for net events * @notifier: not used * @event: event for notifier * @ptr: neighbor */ int irdma_net_event(struct notifier_block *notifier, unsigned long event, void *ptr) { struct neighbour *neigh = ptr; struct net_device *real_dev, *netdev = (struct net_device *)neigh->dev; struct irdma_device *iwdev; struct ib_device *ibdev; __be32 *p; u32 local_ipaddr[4] = {}; bool ipv4 = true; switch (event) { case NETEVENT_NEIGH_UPDATE: real_dev = rdma_vlan_dev_real_dev(netdev); if (!real_dev) real_dev = netdev; ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA); if (!ibdev) return NOTIFY_DONE; iwdev = to_iwdev(ibdev); p = (__be32 *)neigh->primary_key; if (neigh->tbl->family == AF_INET6) { ipv4 = false; irdma_copy_ip_ntohl(local_ipaddr, p); } else { local_ipaddr[0] = ntohl(*p); } ibdev_dbg(&iwdev->ibdev, "DEV: netdev %p state %d local_ip=%pI4 MAC=%pM\n", iwdev->netdev, neigh->nud_state, local_ipaddr, neigh->ha); if (neigh->nud_state & NUD_VALID) irdma_add_arp(iwdev->rf, local_ipaddr, ipv4, neigh->ha); else irdma_manage_arp_cache(iwdev->rf, neigh->ha, local_ipaddr, ipv4, IRDMA_ARP_DELETE); ib_device_put(ibdev); break; default: break; } return NOTIFY_DONE; } /** * irdma_netdevice_event - system notifier for netdev events * @notifier: not used * @event: event for notifier * @ptr: netdev */ int irdma_netdevice_event(struct notifier_block *notifier, unsigned long event, void *ptr) { struct irdma_device *iwdev; struct ib_device *ibdev; struct net_device *netdev = netdev_notifier_info_to_dev(ptr); ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_IRDMA); if (!ibdev) return NOTIFY_DONE; iwdev = to_iwdev(ibdev); iwdev->iw_status = 1; switch (event) { case NETDEV_DOWN: iwdev->iw_status = 0; fallthrough; case NETDEV_UP: irdma_port_ibevent(iwdev); break; default: break; } ib_device_put(ibdev); return NOTIFY_DONE; } /** * irdma_add_ipv6_addr - add ipv6 address to the hw arp table * @iwdev: irdma device */ static void irdma_add_ipv6_addr(struct irdma_device *iwdev) { struct net_device *ip_dev; struct inet6_dev *idev; struct inet6_ifaddr *ifp, *tmp; u32 local_ipaddr6[4]; rcu_read_lock(); for_each_netdev_rcu (&init_net, ip_dev) { if (((rdma_vlan_dev_vlan_id(ip_dev) < 0xFFFF && rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev) || ip_dev == iwdev->netdev) && (READ_ONCE(ip_dev->flags) & IFF_UP)) { idev = __in6_dev_get(ip_dev); if (!idev) { ibdev_err(&iwdev->ibdev, "ipv6 inet device not found\n"); break; } list_for_each_entry_safe (ifp, tmp, &idev->addr_list, if_list) { ibdev_dbg(&iwdev->ibdev, "INIT: IP=%pI6, vlan_id=%d, MAC=%pM\n", &ifp->addr, rdma_vlan_dev_vlan_id(ip_dev), ip_dev->dev_addr); irdma_copy_ip_ntohl(local_ipaddr6, ifp->addr.in6_u.u6_addr32); irdma_manage_arp_cache(iwdev->rf, ip_dev->dev_addr, local_ipaddr6, false, IRDMA_ARP_ADD); } } } rcu_read_unlock(); } /** * irdma_add_ipv4_addr - add ipv4 address to the hw arp table * @iwdev: irdma device */ static void irdma_add_ipv4_addr(struct irdma_device *iwdev) { struct net_device *dev; struct in_device *idev; u32 ip_addr; rcu_read_lock(); for_each_netdev_rcu (&init_net, dev) { if (((rdma_vlan_dev_vlan_id(dev) < 0xFFFF && rdma_vlan_dev_real_dev(dev) == iwdev->netdev) || dev == iwdev->netdev) && (READ_ONCE(dev->flags) & IFF_UP)) { const struct in_ifaddr *ifa; idev = __in_dev_get_rcu(dev); if (!idev) continue; in_dev_for_each_ifa_rcu(ifa, idev) { ibdev_dbg(&iwdev->ibdev, "CM: IP=%pI4, vlan_id=%d, MAC=%pM\n", &ifa->ifa_address, rdma_vlan_dev_vlan_id(dev), dev->dev_addr); ip_addr = ntohl(ifa->ifa_address); irdma_manage_arp_cache(iwdev->rf, dev->dev_addr, &ip_addr, true, IRDMA_ARP_ADD); } } } rcu_read_unlock(); } /** * irdma_add_ip - add ip addresses * @iwdev: irdma device * * Add ipv4/ipv6 addresses to the arp cache */ void irdma_add_ip(struct irdma_device *iwdev) { irdma_add_ipv4_addr(iwdev); irdma_add_ipv6_addr(iwdev); } /** * irdma_alloc_and_get_cqp_request - get cqp struct * @cqp: device cqp ptr * @wait: cqp to be used in wait mode */ struct irdma_cqp_request *irdma_alloc_and_get_cqp_request(struct irdma_cqp *cqp, bool wait) { struct irdma_cqp_request *cqp_request = NULL; unsigned long flags; spin_lock_irqsave(&cqp->req_lock, flags); if (!list_empty(&cqp->cqp_avail_reqs)) { cqp_request = list_first_entry(&cqp->cqp_avail_reqs, struct irdma_cqp_request, list); list_del_init(&cqp_request->list); } spin_unlock_irqrestore(&cqp->req_lock, flags); if (!cqp_request) { cqp_request = kzalloc(sizeof(*cqp_request), GFP_ATOMIC); if (cqp_request) { cqp_request->dynamic = true; if (wait) init_waitqueue_head(&cqp_request->waitq); } } if (!cqp_request) { ibdev_dbg(to_ibdev(cqp->sc_cqp.dev), "ERR: CQP Request Fail: No Memory"); return NULL; } cqp_request->waiting = wait; refcount_set(&cqp_request->refcnt, 1); memset(&cqp_request->compl_info, 0, sizeof(cqp_request->compl_info)); return cqp_request; } /** * irdma_get_cqp_request - increase refcount for cqp_request * @cqp_request: pointer to cqp_request instance */ static inline void irdma_get_cqp_request(struct irdma_cqp_request *cqp_request) { refcount_inc(&cqp_request->refcnt); } /** * irdma_free_cqp_request - free cqp request * @cqp: cqp ptr * @cqp_request: to be put back in cqp list */ void irdma_free_cqp_request(struct irdma_cqp *cqp, struct irdma_cqp_request *cqp_request) { unsigned long flags; if (cqp_request->dynamic) { kfree(cqp_request); } else { WRITE_ONCE(cqp_request->request_done, false); cqp_request->callback_fcn = NULL; cqp_request->waiting = false; spin_lock_irqsave(&cqp->req_lock, flags); list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs); spin_unlock_irqrestore(&cqp->req_lock, flags); } wake_up(&cqp->remove_wq); } /** * irdma_put_cqp_request - dec ref count and free if 0 * @cqp: cqp ptr * @cqp_request: to be put back in cqp list */ void irdma_put_cqp_request(struct irdma_cqp *cqp, struct irdma_cqp_request *cqp_request) { if (refcount_dec_and_test(&cqp_request->refcnt)) irdma_free_cqp_request(cqp, cqp_request); } /** * irdma_free_pending_cqp_request -free pending cqp request objs * @cqp: cqp ptr * @cqp_request: to be put back in cqp list */ static void irdma_free_pending_cqp_request(struct irdma_cqp *cqp, struct irdma_cqp_request *cqp_request) { if (cqp_request->waiting) { cqp_request->compl_info.error = true; WRITE_ONCE(cqp_request->request_done, true); wake_up(&cqp_request->waitq); } wait_event_timeout(cqp->remove_wq, refcount_read(&cqp_request->refcnt) == 1, 1000); irdma_put_cqp_request(cqp, cqp_request); } /** * irdma_cleanup_pending_cqp_op - clean-up cqp with no * completions * @rf: RDMA PCI function */ void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf) { struct irdma_sc_dev *dev = &rf->sc_dev; struct irdma_cqp *cqp = &rf->cqp; struct irdma_cqp_request *cqp_request = NULL; struct cqp_cmds_info *pcmdinfo = NULL; u32 i, pending_work, wqe_idx; pending_work = IRDMA_RING_USED_QUANTA(cqp->sc_cqp.sq_ring); wqe_idx = IRDMA_RING_CURRENT_TAIL(cqp->sc_cqp.sq_ring); for (i = 0; i < pending_work; i++) { cqp_request = (struct irdma_cqp_request *)(unsigned long) cqp->scratch_array[wqe_idx]; if (cqp_request) irdma_free_pending_cqp_request(cqp, cqp_request); wqe_idx = (wqe_idx + 1) % IRDMA_RING_SIZE(cqp->sc_cqp.sq_ring); } while (!list_empty(&dev->cqp_cmd_head)) { pcmdinfo = irdma_remove_cqp_head(dev); cqp_request = container_of(pcmdinfo, struct irdma_cqp_request, info); if (cqp_request) irdma_free_pending_cqp_request(cqp, cqp_request); } } /** * irdma_wait_event - wait for completion * @rf: RDMA PCI function * @cqp_request: cqp request to wait */ static int irdma_wait_event(struct irdma_pci_f *rf, struct irdma_cqp_request *cqp_request) { struct irdma_cqp_timeout cqp_timeout = {}; bool cqp_error = false; int err_code = 0; cqp_timeout.compl_cqp_cmds = atomic64_read(&rf->sc_dev.cqp->completed_ops); do { irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq); if (wait_event_timeout(cqp_request->waitq, READ_ONCE(cqp_request->request_done), msecs_to_jiffies(CQP_COMPL_WAIT_TIME_MS))) break; irdma_check_cqp_progress(&cqp_timeout, &rf->sc_dev); if (cqp_timeout.count < CQP_TIMEOUT_THRESHOLD) continue; if (!rf->reset) { rf->reset = true; rf->gen_ops.request_reset(rf); } return -ETIMEDOUT; } while (1); cqp_error = cqp_request->compl_info.error; if (cqp_error) { err_code = -EIO; if (cqp_request->compl_info.maj_err_code == 0xFFFF) { if (cqp_request->compl_info.min_err_code == 0x8002) err_code = -EBUSY; else if (cqp_request->compl_info.min_err_code == 0x8029) { if (!rf->reset) { rf->reset = true; rf->gen_ops.request_reset(rf); } } } } return err_code; } static const char *const irdma_cqp_cmd_names[IRDMA_MAX_CQP_OPS] = { [IRDMA_OP_CEQ_DESTROY] = "Destroy CEQ Cmd", [IRDMA_OP_AEQ_DESTROY] = "Destroy AEQ Cmd", [IRDMA_OP_DELETE_ARP_CACHE_ENTRY] = "Delete ARP Cache Cmd", [IRDMA_OP_MANAGE_APBVT_ENTRY] = "Manage APBV Table Entry Cmd", [IRDMA_OP_CEQ_CREATE] = "CEQ Create Cmd", [IRDMA_OP_AEQ_CREATE] = "AEQ Destroy Cmd", [IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY] = "Manage Quad Hash Table Entry Cmd", [IRDMA_OP_QP_MODIFY] = "Modify QP Cmd", [IRDMA_OP_QP_UPLOAD_CONTEXT] = "Upload Context Cmd", [IRDMA_OP_CQ_CREATE] = "Create CQ Cmd", [IRDMA_OP_CQ_DESTROY] = "Destroy CQ Cmd", [IRDMA_OP_QP_CREATE] = "Create QP Cmd", [IRDMA_OP_QP_DESTROY] = "Destroy QP Cmd", [IRDMA_OP_ALLOC_STAG] = "Allocate STag Cmd", [IRDMA_OP_MR_REG_NON_SHARED] = "Register Non-Shared MR Cmd", [IRDMA_OP_DEALLOC_STAG] = "Deallocate STag Cmd", [IRDMA_OP_MW_ALLOC] = "Allocate Memory Window Cmd", [IRDMA_OP_QP_FLUSH_WQES] = "Flush QP Cmd", [IRDMA_OP_ADD_ARP_CACHE_ENTRY] = "Add ARP Cache Cmd", [IRDMA_OP_MANAGE_PUSH_PAGE] = "Manage Push Page Cmd", [IRDMA_OP_UPDATE_PE_SDS] = "Update PE SDs Cmd", [IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE] = "Manage HMC PM Function Table Cmd", [IRDMA_OP_SUSPEND] = "Suspend QP Cmd", [IRDMA_OP_RESUME] = "Resume QP Cmd", [IRDMA_OP_MANAGE_VF_PBLE_BP] = "Manage VF PBLE Backing Pages Cmd", [IRDMA_OP_QUERY_FPM_VAL] = "Query FPM Values Cmd", [IRDMA_OP_COMMIT_FPM_VAL] = "Commit FPM Values Cmd", [IRDMA_OP_AH_CREATE] = "Create Address Handle Cmd", [IRDMA_OP_AH_MODIFY] = "Modify Address Handle Cmd", [IRDMA_OP_AH_DESTROY] = "Destroy Address Handle Cmd", [IRDMA_OP_MC_CREATE] = "Create Multicast Group Cmd", [IRDMA_OP_MC_DESTROY] = "Destroy Multicast Group Cmd", [IRDMA_OP_MC_MODIFY] = "Modify Multicast Group Cmd", [IRDMA_OP_STATS_ALLOCATE] = "Add Statistics Instance Cmd", [IRDMA_OP_STATS_FREE] = "Free Statistics Instance Cmd", [IRDMA_OP_STATS_GATHER] = "Gather Statistics Cmd", [IRDMA_OP_WS_ADD_NODE] = "Add Work Scheduler Node Cmd", [IRDMA_OP_WS_MODIFY_NODE] = "Modify Work Scheduler Node Cmd", [IRDMA_OP_WS_DELETE_NODE] = "Delete Work Scheduler Node Cmd", [IRDMA_OP_SET_UP_MAP] = "Set UP-UP Mapping Cmd", [IRDMA_OP_GEN_AE] = "Generate AE Cmd", [IRDMA_OP_QUERY_RDMA_FEATURES] = "RDMA Get Features Cmd", [IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY] = "Allocate Local MAC Entry Cmd", [IRDMA_OP_ADD_LOCAL_MAC_ENTRY] = "Add Local MAC Entry Cmd", [IRDMA_OP_DELETE_LOCAL_MAC_ENTRY] = "Delete Local MAC Entry Cmd", [IRDMA_OP_CQ_MODIFY] = "CQ Modify Cmd", }; static const struct irdma_cqp_err_info irdma_noncrit_err_list[] = { {0xffff, 0x8002, "Invalid State"}, {0xffff, 0x8006, "Flush No Wqe Pending"}, {0xffff, 0x8007, "Modify QP Bad Close"}, {0xffff, 0x8009, "LLP Closed"}, {0xffff, 0x800a, "Reset Not Sent"} }; /** * irdma_cqp_crit_err - check if CQP error is critical * @dev: pointer to dev structure * @cqp_cmd: code for last CQP operation * @maj_err_code: major error code * @min_err_code: minot error code */ bool irdma_cqp_crit_err(struct irdma_sc_dev *dev, u8 cqp_cmd, u16 maj_err_code, u16 min_err_code) { int i; for (i = 0; i < ARRAY_SIZE(irdma_noncrit_err_list); ++i) { if (maj_err_code == irdma_noncrit_err_list[i].maj && min_err_code == irdma_noncrit_err_list[i].min) { ibdev_dbg(to_ibdev(dev), "CQP: [%s Error][%s] maj=0x%x min=0x%x\n", irdma_noncrit_err_list[i].desc, irdma_cqp_cmd_names[cqp_cmd], maj_err_code, min_err_code); return false; } } return true; } /** * irdma_handle_cqp_op - process cqp command * @rf: RDMA PCI function * @cqp_request: cqp request to process */ int irdma_handle_cqp_op(struct irdma_pci_f *rf, struct irdma_cqp_request *cqp_request) { struct irdma_sc_dev *dev = &rf->sc_dev; struct cqp_cmds_info *info = &cqp_request->info; int status; bool put_cqp_request = true; if (rf->reset) return -EBUSY; irdma_get_cqp_request(cqp_request); status = irdma_process_cqp_cmd(dev, info); if (status) goto err; if (cqp_request->waiting) { put_cqp_request = false; status = irdma_wait_event(rf, cqp_request); if (status) goto err; } return 0; err: if (irdma_cqp_crit_err(dev, info->cqp_cmd, cqp_request->compl_info.maj_err_code, cqp_request->compl_info.min_err_code)) ibdev_err(&rf->iwdev->ibdev, "[%s Error][op_code=%d] status=%d waiting=%d completion_err=%d maj=0x%x min=0x%x\n", irdma_cqp_cmd_names[info->cqp_cmd], info->cqp_cmd, status, cqp_request->waiting, cqp_request->compl_info.error, cqp_request->compl_info.maj_err_code, cqp_request->compl_info.min_err_code); if (put_cqp_request) irdma_put_cqp_request(&rf->cqp, cqp_request); return status; } void irdma_qp_add_ref(struct ib_qp *ibqp) { struct irdma_qp *iwqp = (struct irdma_qp *)ibqp; refcount_inc(&iwqp->refcnt); } void irdma_qp_rem_ref(struct ib_qp *ibqp) { struct irdma_qp *iwqp = to_iwqp(ibqp); struct irdma_device *iwdev = iwqp->iwdev; u32 qp_num; unsigned long flags; spin_lock_irqsave(&iwdev->rf->qptable_lock, flags); if (!refcount_dec_and_test(&iwqp->refcnt)) { spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags); return; } qp_num = iwqp->ibqp.qp_num; iwdev->rf->qp_table[qp_num] = NULL; spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags); complete(&iwqp->free_qp); } void irdma_cq_add_ref(struct ib_cq *ibcq) { struct irdma_cq *iwcq = to_iwcq(ibcq); refcount_inc(&iwcq->refcnt); } void irdma_cq_rem_ref(struct ib_cq *ibcq) { struct ib_device *ibdev = ibcq->device; struct irdma_device *iwdev = to_iwdev(ibdev); struct irdma_cq *iwcq = to_iwcq(ibcq); unsigned long flags; spin_lock_irqsave(&iwdev->rf->cqtable_lock, flags); if (!refcount_dec_and_test(&iwcq->refcnt)) { spin_unlock_irqrestore(&iwdev->rf->cqtable_lock, flags); return; } iwdev->rf->cq_table[iwcq->cq_num] = NULL; spin_unlock_irqrestore(&iwdev->rf->cqtable_lock, flags); complete(&iwcq->free_cq); } struct ib_device *to_ibdev(struct irdma_sc_dev *dev) { return &(container_of(dev, struct irdma_pci_f, sc_dev))->iwdev->ibdev; } /** * irdma_get_qp - get qp address * @device: iwarp device * @qpn: qp number */ struct ib_qp *irdma_get_qp(struct ib_device *device, int qpn) { struct irdma_device *iwdev = to_iwdev(device); if (qpn < IW_FIRST_QPN || qpn >= iwdev->rf->max_qp) return NULL; return &iwdev->rf->qp_table[qpn]->ibqp; } /** * irdma_remove_cqp_head - return head entry and remove * @dev: device */ void *irdma_remove_cqp_head(struct irdma_sc_dev *dev) { struct list_head *entry; struct list_head *list = &dev->cqp_cmd_head; if (list_empty(list)) return NULL; entry = list->next; list_del(entry); return entry; } /** * irdma_cqp_sds_cmd - create cqp command for sd * @dev: hardware control device structure * @sdinfo: information for sd cqp * */ int irdma_cqp_sds_cmd(struct irdma_sc_dev *dev, struct irdma_update_sds_info *sdinfo) { struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; struct irdma_pci_f *rf = dev_to_rf(dev); int status; cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); if (!cqp_request) return -ENOMEM; cqp_info = &cqp_request->info; memcpy(&cqp_info->in.u.update_pe_sds.info, sdinfo, sizeof(cqp_info->in.u.update_pe_sds.info)); cqp_info->cqp_cmd = IRDMA_OP_UPDATE_PE_SDS; cqp_info->post_sq = 1; cqp_info->in.u.update_pe_sds.dev = dev; cqp_info->in.u.update_pe_sds.scratch = (uintptr_t)cqp_request; status = irdma_handle_cqp_op(rf, cqp_request); irdma_put_cqp_request(&rf->cqp, cqp_request); return status; } /** * irdma_cqp_qp_suspend_resume - cqp command for suspend/resume * @qp: hardware control qp * @op: suspend or resume */ int irdma_cqp_qp_suspend_resume(struct irdma_sc_qp *qp, u8 op) { struct irdma_sc_dev *dev = qp->dev; struct irdma_cqp_request *cqp_request; struct irdma_sc_cqp *cqp = dev->cqp; struct cqp_cmds_info *cqp_info; struct irdma_pci_f *rf = dev_to_rf(dev); int status; cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false); if (!cqp_request) return -ENOMEM; cqp_info = &cqp_request->info; cqp_info->cqp_cmd = op; cqp_info->in.u.suspend_resume.cqp = cqp; cqp_info->in.u.suspend_resume.qp = qp; cqp_info->in.u.suspend_resume.scratch = (uintptr_t)cqp_request; status = irdma_handle_cqp_op(rf, cqp_request); irdma_put_cqp_request(&rf->cqp, cqp_request); return status; } /** * irdma_term_modify_qp - modify qp for term message * @qp: hardware control qp * @next_state: qp's next state * @term: terminate code * @term_len: length */ void irdma_term_modify_qp(struct irdma_sc_qp *qp, u8 next_state, u8 term, u8 term_len) { struct irdma_qp *iwqp; iwqp = qp->qp_uk.back_qp; irdma_next_iw_state(iwqp, next_state, 0, term, term_len); }; /** * irdma_terminate_done - after terminate is completed * @qp: hardware control qp * @timeout_occurred: indicates if terminate timer expired */ void irdma_terminate_done(struct irdma_sc_qp *qp, int timeout_occurred) { struct irdma_qp *iwqp; u8 hte = 0; bool first_time; unsigned long flags; iwqp = qp->qp_uk.back_qp; spin_lock_irqsave(&iwqp->lock, flags); if (iwqp->hte_added) { iwqp->hte_added = 0; hte = 1; } first_time = !(qp->term_flags & IRDMA_TERM_DONE); qp->term_flags |= IRDMA_TERM_DONE; spin_unlock_irqrestore(&iwqp->lock, flags); if (first_time) { if (!timeout_occurred) irdma_terminate_del_timer(qp); irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, hte, 0, 0); irdma_cm_disconn(iwqp); } } static void irdma_terminate_timeout(struct timer_list *t) { struct irdma_qp *iwqp = from_timer(iwqp, t, terminate_timer); struct irdma_sc_qp *qp = &iwqp->sc_qp; irdma_terminate_done(qp, 1); irdma_qp_rem_ref(&iwqp->ibqp); } /** * irdma_terminate_start_timer - start terminate timeout * @qp: hardware control qp */ void irdma_terminate_start_timer(struct irdma_sc_qp *qp) { struct irdma_qp *iwqp; iwqp = qp->qp_uk.back_qp; irdma_qp_add_ref(&iwqp->ibqp); timer_setup(&iwqp->terminate_timer, irdma_terminate_timeout, 0); iwqp->terminate_timer.expires = jiffies + HZ; add_timer(&iwqp->terminate_timer); } /** * irdma_terminate_del_timer - delete terminate timeout * @qp: hardware control qp */ void irdma_terminate_del_timer(struct irdma_sc_qp *qp) { struct irdma_qp *iwqp; int ret; iwqp = qp->qp_uk.back_qp; ret = del_timer(&iwqp->terminate_timer); if (ret) irdma_qp_rem_ref(&iwqp->ibqp); } /** * irdma_cqp_query_fpm_val_cmd - send cqp command for fpm * @dev: function device struct * @val_mem: buffer for fpm * @hmc_fn_id: function id for fpm */ int irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev, struct irdma_dma_mem *val_mem, u8 hmc_fn_id) { struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; struct irdma_pci_f *rf = dev_to_rf(dev); int status; cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); if (!cqp_request) return -ENOMEM; cqp_info = &cqp_request->info; cqp_request->param = NULL; cqp_info->in.u.query_fpm_val.cqp = dev->cqp; cqp_info->in.u.query_fpm_val.fpm_val_pa = val_mem->pa; cqp_info->in.u.query_fpm_val.fpm_val_va = val_mem->va; cqp_info->in.u.query_fpm_val.hmc_fn_id = hmc_fn_id; cqp_info->cqp_cmd = IRDMA_OP_QUERY_FPM_VAL; cqp_info->post_sq = 1; cqp_info->in.u.query_fpm_val.scratch = (uintptr_t)cqp_request; status = irdma_handle_cqp_op(rf, cqp_request); irdma_put_cqp_request(&rf->cqp, cqp_request); return status; } /** * irdma_cqp_commit_fpm_val_cmd - commit fpm values in hw * @dev: hardware control device structure * @val_mem: buffer with fpm values * @hmc_fn_id: function id for fpm */ int irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev, struct irdma_dma_mem *val_mem, u8 hmc_fn_id) { struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; struct irdma_pci_f *rf = dev_to_rf(dev); int status; cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); if (!cqp_request) return -ENOMEM; cqp_info = &cqp_request->info; cqp_request->param = NULL; cqp_info->in.u.commit_fpm_val.cqp = dev->cqp; cqp_info->in.u.commit_fpm_val.fpm_val_pa = val_mem->pa; cqp_info->in.u.commit_fpm_val.fpm_val_va = val_mem->va; cqp_info->in.u.commit_fpm_val.hmc_fn_id = hmc_fn_id; cqp_info->cqp_cmd = IRDMA_OP_COMMIT_FPM_VAL; cqp_info->post_sq = 1; cqp_info->in.u.commit_fpm_val.scratch = (uintptr_t)cqp_request; status = irdma_handle_cqp_op(rf, cqp_request); irdma_put_cqp_request(&rf->cqp, cqp_request); return status; } /** * irdma_cqp_cq_create_cmd - create a cq for the cqp * @dev: device pointer * @cq: pointer to created cq */ int irdma_cqp_cq_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq) { struct irdma_pci_f *rf = dev_to_rf(dev); struct irdma_cqp *iwcqp = &rf->cqp; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; int status; cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true); if (!cqp_request) return -ENOMEM; cqp_info = &cqp_request->info; cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE; cqp_info->post_sq = 1; cqp_info->in.u.cq_create.cq = cq; cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request; status = irdma_handle_cqp_op(rf, cqp_request); irdma_put_cqp_request(iwcqp, cqp_request); return status; } /** * irdma_cqp_qp_create_cmd - create a qp for the cqp * @dev: device pointer * @qp: pointer to created qp */ int irdma_cqp_qp_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp) { struct irdma_pci_f *rf = dev_to_rf(dev); struct irdma_cqp *iwcqp = &rf->cqp; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; struct irdma_create_qp_info *qp_info; int status; cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true); if (!cqp_request) return -ENOMEM; cqp_info = &cqp_request->info; qp_info = &cqp_request->info.in.u.qp_create.info; memset(qp_info, 0, sizeof(*qp_info)); qp_info->cq_num_valid = true; qp_info->next_iwarp_state = IRDMA_QP_STATE_RTS; cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE; cqp_info->post_sq = 1; cqp_info->in.u.qp_create.qp = qp; cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request; status = irdma_handle_cqp_op(rf, cqp_request); irdma_put_cqp_request(iwcqp, cqp_request); return status; } /** * irdma_dealloc_push_page - free a push page for qp * @rf: RDMA PCI function * @qp: hardware control qp */ static void irdma_dealloc_push_page(struct irdma_pci_f *rf, struct irdma_sc_qp *qp) { struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; int status; if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) return; cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false); if (!cqp_request) return; cqp_info = &cqp_request->info; cqp_info->cqp_cmd = IRDMA_OP_MANAGE_PUSH_PAGE; cqp_info->post_sq = 1; cqp_info->in.u.manage_push_page.info.push_idx = qp->push_idx; cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle; cqp_info->in.u.manage_push_page.info.free_page = 1; cqp_info->in.u.manage_push_page.info.push_page_type = 0; cqp_info->in.u.manage_push_page.cqp = &rf->cqp.sc_cqp; cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request; status = irdma_handle_cqp_op(rf, cqp_request); if (!status) qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX; irdma_put_cqp_request(&rf->cqp, cqp_request); } /** * irdma_free_qp_rsrc - free up memory resources for qp * @iwqp: qp ptr (user or kernel) */ void irdma_free_qp_rsrc(struct irdma_qp *iwqp) { struct irdma_device *iwdev = iwqp->iwdev; struct irdma_pci_f *rf = iwdev->rf; u32 qp_num = iwqp->ibqp.qp_num; irdma_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp); irdma_dealloc_push_page(rf, &iwqp->sc_qp); if (iwqp->sc_qp.vsi) { irdma_qp_rem_qos(&iwqp->sc_qp); iwqp->sc_qp.dev->ws_remove(iwqp->sc_qp.vsi, iwqp->sc_qp.user_pri); } if (qp_num > 2) irdma_free_rsrc(rf, rf->allocated_qps, qp_num); dma_free_coherent(rf->sc_dev.hw->device, iwqp->q2_ctx_mem.size, iwqp->q2_ctx_mem.va, iwqp->q2_ctx_mem.pa); iwqp->q2_ctx_mem.va = NULL; dma_free_coherent(rf->sc_dev.hw->device, iwqp->kqp.dma_mem.size, iwqp->kqp.dma_mem.va, iwqp->kqp.dma_mem.pa); iwqp->kqp.dma_mem.va = NULL; kfree(iwqp->kqp.sq_wrid_mem); kfree(iwqp->kqp.rq_wrid_mem); } /** * irdma_cq_wq_destroy - send cq destroy cqp * @rf: RDMA PCI function * @cq: hardware control cq */ void irdma_cq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_cq *cq) { struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); if (!cqp_request) return; cqp_info = &cqp_request->info; cqp_info->cqp_cmd = IRDMA_OP_CQ_DESTROY; cqp_info->post_sq = 1; cqp_info->in.u.cq_destroy.cq = cq; cqp_info->in.u.cq_destroy.scratch = (uintptr_t)cqp_request; irdma_handle_cqp_op(rf, cqp_request); irdma_put_cqp_request(&rf->cqp, cqp_request); } /** * irdma_hw_modify_qp_callback - handle state for modifyQPs that don't wait * @cqp_request: modify QP completion */ static void irdma_hw_modify_qp_callback(struct irdma_cqp_request *cqp_request) { struct cqp_cmds_info *cqp_info; struct irdma_qp *iwqp; cqp_info = &cqp_request->info; iwqp = cqp_info->in.u.qp_modify.qp->qp_uk.back_qp; atomic_dec(&iwqp->hw_mod_qp_pend); wake_up(&iwqp->mod_qp_waitq); } /** * irdma_hw_modify_qp - setup cqp for modify qp * @iwdev: RDMA device * @iwqp: qp ptr (user or kernel) * @info: info for modify qp * @wait: flag to wait or not for modify qp completion */ int irdma_hw_modify_qp(struct irdma_device *iwdev, struct irdma_qp *iwqp, struct irdma_modify_qp_info *info, bool wait) { int status; struct irdma_pci_f *rf = iwdev->rf; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; struct irdma_modify_qp_info *m_info; cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait); if (!cqp_request) return -ENOMEM; if (!wait) { cqp_request->callback_fcn = irdma_hw_modify_qp_callback; atomic_inc(&iwqp->hw_mod_qp_pend); } cqp_info = &cqp_request->info; m_info = &cqp_info->in.u.qp_modify.info; memcpy(m_info, info, sizeof(*m_info)); cqp_info->cqp_cmd = IRDMA_OP_QP_MODIFY; cqp_info->post_sq = 1; cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp; cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request; status = irdma_handle_cqp_op(rf, cqp_request); irdma_put_cqp_request(&rf->cqp, cqp_request); if (status) { if (rdma_protocol_roce(&iwdev->ibdev, 1)) return status; switch (m_info->next_iwarp_state) { struct irdma_gen_ae_info ae_info; case IRDMA_QP_STATE_RTS: case IRDMA_QP_STATE_IDLE: case IRDMA_QP_STATE_TERMINATE: case IRDMA_QP_STATE_CLOSING: if (info->curr_iwarp_state == IRDMA_QP_STATE_IDLE) irdma_send_reset(iwqp->cm_node); else iwqp->sc_qp.term_flags = IRDMA_TERM_DONE; if (!wait) { ae_info.ae_code = IRDMA_AE_BAD_CLOSE; ae_info.ae_src = 0; irdma_gen_ae(rf, &iwqp->sc_qp, &ae_info, false); } else { cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait); if (!cqp_request) return -ENOMEM; cqp_info = &cqp_request->info; m_info = &cqp_info->in.u.qp_modify.info; memcpy(m_info, info, sizeof(*m_info)); cqp_info->cqp_cmd = IRDMA_OP_QP_MODIFY; cqp_info->post_sq = 1; cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp; cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request; m_info->next_iwarp_state = IRDMA_QP_STATE_ERROR; m_info->reset_tcp_conn = true; irdma_handle_cqp_op(rf, cqp_request); irdma_put_cqp_request(&rf->cqp, cqp_request); } break; case IRDMA_QP_STATE_ERROR: default: break; } } return status; } /** * irdma_cqp_cq_destroy_cmd - destroy the cqp cq * @dev: device pointer * @cq: pointer to cq */ void irdma_cqp_cq_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq) { struct irdma_pci_f *rf = dev_to_rf(dev); irdma_cq_wq_destroy(rf, cq); } /** * irdma_cqp_qp_destroy_cmd - destroy the cqp * @dev: device pointer * @qp: pointer to qp */ int irdma_cqp_qp_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp) { struct irdma_pci_f *rf = dev_to_rf(dev); struct irdma_cqp *iwcqp = &rf->cqp; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; int status; cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true); if (!cqp_request) return -ENOMEM; cqp_info = &cqp_request->info; memset(cqp_info, 0, sizeof(*cqp_info)); cqp_info->cqp_cmd = IRDMA_OP_QP_DESTROY; cqp_info->post_sq = 1; cqp_info->in.u.qp_destroy.qp = qp; cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request; cqp_info->in.u.qp_destroy.remove_hash_idx = true; status = irdma_handle_cqp_op(rf, cqp_request); irdma_put_cqp_request(&rf->cqp, cqp_request); return status; } /** * irdma_ieq_mpa_crc_ae - generate AE for crc error * @dev: hardware control device structure * @qp: hardware control qp */ void irdma_ieq_mpa_crc_ae(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp) { struct irdma_gen_ae_info info = {}; struct irdma_pci_f *rf = dev_to_rf(dev); ibdev_dbg(&rf->iwdev->ibdev, "AEQ: Generate MPA CRC AE\n"); info.ae_code = IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR; info.ae_src = IRDMA_AE_SOURCE_RQ; irdma_gen_ae(rf, qp, &info, false); } /** * irdma_init_hash_desc - initialize hash for crc calculation * @desc: cryption type */ int irdma_init_hash_desc(struct shash_desc **desc) { struct crypto_shash *tfm; struct shash_desc *tdesc; tfm = crypto_alloc_shash("crc32c", 0, 0); if (IS_ERR(tfm)) return -EINVAL; tdesc = kzalloc(sizeof(*tdesc) + crypto_shash_descsize(tfm), GFP_KERNEL); if (!tdesc) { crypto_free_shash(tfm); return -EINVAL; } tdesc->tfm = tfm; *desc = tdesc; return 0; } /** * irdma_free_hash_desc - free hash desc * @desc: to be freed */ void irdma_free_hash_desc(struct shash_desc *desc) { if (desc) { crypto_free_shash(desc->tfm); kfree(desc); } } /** * irdma_ieq_check_mpacrc - check if mpa crc is OK * @desc: desc for hash * @addr: address of buffer for crc * @len: length of buffer * @val: value to be compared */ int irdma_ieq_check_mpacrc(struct shash_desc *desc, void *addr, u32 len, u32 val) { u32 crc = 0; int ret; int ret_code = 0; crypto_shash_init(desc); ret = crypto_shash_update(desc, addr, len); if (!ret) crypto_shash_final(desc, (u8 *)&crc); if (crc != val) ret_code = -EINVAL; return ret_code; } /** * irdma_ieq_get_qp - get qp based on quad in puda buffer * @dev: hardware control device structure * @buf: receive puda buffer on exception q */ struct irdma_sc_qp *irdma_ieq_get_qp(struct irdma_sc_dev *dev, struct irdma_puda_buf *buf) { struct irdma_qp *iwqp; struct irdma_cm_node *cm_node; struct irdma_device *iwdev = buf->vsi->back_vsi; u32 loc_addr[4] = {}; u32 rem_addr[4] = {}; u16 loc_port, rem_port; struct ipv6hdr *ip6h; struct iphdr *iph = (struct iphdr *)buf->iph; struct tcphdr *tcph = (struct tcphdr *)buf->tcph; if (iph->version == 4) { loc_addr[0] = ntohl(iph->daddr); rem_addr[0] = ntohl(iph->saddr); } else { ip6h = (struct ipv6hdr *)buf->iph; irdma_copy_ip_ntohl(loc_addr, ip6h->daddr.in6_u.u6_addr32); irdma_copy_ip_ntohl(rem_addr, ip6h->saddr.in6_u.u6_addr32); } loc_port = ntohs(tcph->dest); rem_port = ntohs(tcph->source); cm_node = irdma_find_node(&iwdev->cm_core, rem_port, rem_addr, loc_port, loc_addr, buf->vlan_valid ? buf->vlan_id : 0xFFFF); if (!cm_node) return NULL; iwqp = cm_node->iwqp; irdma_rem_ref_cm_node(cm_node); return &iwqp->sc_qp; } /** * irdma_send_ieq_ack - ACKs for duplicate or OOO partials FPDUs * @qp: qp ptr */ void irdma_send_ieq_ack(struct irdma_sc_qp *qp) { struct irdma_cm_node *cm_node = ((struct irdma_qp *)qp->qp_uk.back_qp)->cm_node; struct irdma_puda_buf *buf = qp->pfpdu.lastrcv_buf; struct tcphdr *tcph = (struct tcphdr *)buf->tcph; cm_node->tcp_cntxt.rcv_nxt = qp->pfpdu.nextseqnum; cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq); irdma_send_ack(cm_node); } /** * irdma_puda_ieq_get_ah_info - get AH info from IEQ buffer * @qp: qp pointer * @ah_info: AH info pointer */ void irdma_puda_ieq_get_ah_info(struct irdma_sc_qp *qp, struct irdma_ah_info *ah_info) { struct irdma_puda_buf *buf = qp->pfpdu.ah_buf; struct iphdr *iph; struct ipv6hdr *ip6h; memset(ah_info, 0, sizeof(*ah_info)); ah_info->do_lpbk = true; ah_info->vlan_tag = buf->vlan_id; ah_info->insert_vlan_tag = buf->vlan_valid; ah_info->ipv4_valid = buf->ipv4; ah_info->vsi = qp->vsi; if (buf->smac_valid) ether_addr_copy(ah_info->mac_addr, buf->smac); if (buf->ipv4) { ah_info->ipv4_valid = true; iph = (struct iphdr *)buf->iph; ah_info->hop_ttl = iph->ttl; ah_info->tc_tos = iph->tos; ah_info->dest_ip_addr[0] = ntohl(iph->daddr); ah_info->src_ip_addr[0] = ntohl(iph->saddr); } else { ip6h = (struct ipv6hdr *)buf->iph; ah_info->hop_ttl = ip6h->hop_limit; ah_info->tc_tos = ip6h->priority; irdma_copy_ip_ntohl(ah_info->dest_ip_addr, ip6h->daddr.in6_u.u6_addr32); irdma_copy_ip_ntohl(ah_info->src_ip_addr, ip6h->saddr.in6_u.u6_addr32); } ah_info->dst_arpindex = irdma_arp_table(dev_to_rf(qp->dev), ah_info->dest_ip_addr, ah_info->ipv4_valid, NULL, IRDMA_ARP_RESOLVE); } /** * irdma_gen1_ieq_update_tcpip_info - update tcpip in the buffer * @buf: puda to update * @len: length of buffer * @seqnum: seq number for tcp */ static void irdma_gen1_ieq_update_tcpip_info(struct irdma_puda_buf *buf, u16 len, u32 seqnum) { struct tcphdr *tcph; struct iphdr *iph; u16 iphlen; u16 pktsize; u8 *addr = buf->mem.va; iphlen = (buf->ipv4) ? 20 : 40; iph = (struct iphdr *)(addr + buf->maclen); tcph = (struct tcphdr *)(addr + buf->maclen + iphlen); pktsize = len + buf->tcphlen + iphlen; iph->tot_len = htons(pktsize); tcph->seq = htonl(seqnum); } /** * irdma_ieq_update_tcpip_info - update tcpip in the buffer * @buf: puda to update * @len: length of buffer * @seqnum: seq number for tcp */ void irdma_ieq_update_tcpip_info(struct irdma_puda_buf *buf, u16 len, u32 seqnum) { struct tcphdr *tcph; u8 *addr; if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) return irdma_gen1_ieq_update_tcpip_info(buf, len, seqnum); addr = buf->mem.va; tcph = (struct tcphdr *)addr; tcph->seq = htonl(seqnum); } /** * irdma_gen1_puda_get_tcpip_info - get tcpip info from puda * buffer * @info: to get information * @buf: puda buffer */ static int irdma_gen1_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info, struct irdma_puda_buf *buf) { struct iphdr *iph; struct ipv6hdr *ip6h; struct tcphdr *tcph; u16 iphlen; u16 pkt_len; u8 *mem = buf->mem.va; struct ethhdr *ethh = buf->mem.va; if (ethh->h_proto == htons(0x8100)) { info->vlan_valid = true; buf->vlan_id = ntohs(((struct vlan_ethhdr *)ethh)->h_vlan_TCI) & VLAN_VID_MASK; } buf->maclen = (info->vlan_valid) ? 18 : 14; iphlen = (info->l3proto) ? 40 : 20; buf->ipv4 = (info->l3proto) ? false : true; buf->iph = mem + buf->maclen; iph = (struct iphdr *)buf->iph; buf->tcph = buf->iph + iphlen; tcph = (struct tcphdr *)buf->tcph; if (buf->ipv4) { pkt_len = ntohs(iph->tot_len); } else { ip6h = (struct ipv6hdr *)buf->iph; pkt_len = ntohs(ip6h->payload_len) + iphlen; } buf->totallen = pkt_len + buf->maclen; if (info->payload_len < buf->totallen) { ibdev_dbg(to_ibdev(buf->vsi->dev), "ERR: payload_len = 0x%x totallen expected0x%x\n", info->payload_len, buf->totallen); return -EINVAL; } buf->tcphlen = tcph->doff << 2; buf->datalen = pkt_len - iphlen - buf->tcphlen; buf->data = buf->datalen ? buf->tcph + buf->tcphlen : NULL; buf->hdrlen = buf->maclen + iphlen + buf->tcphlen; buf->seqnum = ntohl(tcph->seq); return 0; } /** * irdma_puda_get_tcpip_info - get tcpip info from puda buffer * @info: to get information * @buf: puda buffer */ int irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info, struct irdma_puda_buf *buf) { struct tcphdr *tcph; u32 pkt_len; u8 *mem; if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) return irdma_gen1_puda_get_tcpip_info(info, buf); mem = buf->mem.va; buf->vlan_valid = info->vlan_valid; if (info->vlan_valid) buf->vlan_id = info->vlan; buf->ipv4 = info->ipv4; if (buf->ipv4) buf->iph = mem + IRDMA_IPV4_PAD; else buf->iph = mem; buf->tcph = mem + IRDMA_TCP_OFFSET; tcph = (struct tcphdr *)buf->tcph; pkt_len = info->payload_len; buf->totallen = pkt_len; buf->tcphlen = tcph->doff << 2; buf->datalen = pkt_len - IRDMA_TCP_OFFSET - buf->tcphlen; buf->data = buf->datalen ? buf->tcph + buf->tcphlen : NULL; buf->hdrlen = IRDMA_TCP_OFFSET + buf->tcphlen; buf->seqnum = ntohl(tcph->seq); if (info->smac_valid) { ether_addr_copy(buf->smac, info->smac); buf->smac_valid = true; } return 0; } /** * irdma_hw_stats_timeout - Stats timer-handler which updates all HW stats * @t: timer_list pointer */ static void irdma_hw_stats_timeout(struct timer_list *t) { struct irdma_vsi_pestat *pf_devstat = from_timer(pf_devstat, t, stats_timer); struct irdma_sc_vsi *sc_vsi = pf_devstat->vsi; if (sc_vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) irdma_cqp_gather_stats_cmd(sc_vsi->dev, sc_vsi->pestat, false); else irdma_cqp_gather_stats_gen1(sc_vsi->dev, sc_vsi->pestat); mod_timer(&pf_devstat->stats_timer, jiffies + msecs_to_jiffies(STATS_TIMER_DELAY)); } /** * irdma_hw_stats_start_timer - Start periodic stats timer * @vsi: vsi structure pointer */ void irdma_hw_stats_start_timer(struct irdma_sc_vsi *vsi) { struct irdma_vsi_pestat *devstat = vsi->pestat; timer_setup(&devstat->stats_timer, irdma_hw_stats_timeout, 0); mod_timer(&devstat->stats_timer, jiffies + msecs_to_jiffies(STATS_TIMER_DELAY)); } /** * irdma_hw_stats_stop_timer - Delete periodic stats timer * @vsi: pointer to vsi structure */ void irdma_hw_stats_stop_timer(struct irdma_sc_vsi *vsi) { struct irdma_vsi_pestat *devstat = vsi->pestat; del_timer_sync(&devstat->stats_timer); } /** * irdma_process_stats - Checking for wrap and update stats * @pestat: stats structure pointer */ static inline void irdma_process_stats(struct irdma_vsi_pestat *pestat) { sc_vsi_update_stats(pestat->vsi); } /** * irdma_cqp_gather_stats_gen1 - Gather stats * @dev: pointer to device structure * @pestat: statistics structure */ void irdma_cqp_gather_stats_gen1(struct irdma_sc_dev *dev, struct irdma_vsi_pestat *pestat) { struct irdma_gather_stats *gather_stats = pestat->gather_info.gather_stats_va; const struct irdma_hw_stat_map *map = dev->hw_stats_map; u16 max_stats_idx = dev->hw_attrs.max_stat_idx; u32 stats_inst_offset_32; u32 stats_inst_offset_64; u64 new_val; u16 i; stats_inst_offset_32 = (pestat->gather_info.use_stats_inst) ? pestat->gather_info.stats_inst_index : pestat->hw->hmc.hmc_fn_id; stats_inst_offset_32 *= 4; stats_inst_offset_64 = stats_inst_offset_32 * 2; for (i = 0; i < max_stats_idx; i++) { if (map[i].bitmask <= IRDMA_MAX_STATS_32) new_val = rd32(dev->hw, dev->hw_stats_regs[i] + stats_inst_offset_32); else new_val = rd64(dev->hw, dev->hw_stats_regs[i] + stats_inst_offset_64); gather_stats->val[map[i].byteoff / sizeof(u64)] = new_val; } irdma_process_stats(pestat); } /** * irdma_process_cqp_stats - Checking for wrap and update stats * @cqp_request: cqp_request structure pointer */ static void irdma_process_cqp_stats(struct irdma_cqp_request *cqp_request) { struct irdma_vsi_pestat *pestat = cqp_request->param; irdma_process_stats(pestat); } /** * irdma_cqp_gather_stats_cmd - Gather stats * @dev: pointer to device structure * @pestat: pointer to stats info * @wait: flag to wait or not wait for stats */ int irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev, struct irdma_vsi_pestat *pestat, bool wait) { struct irdma_pci_f *rf = dev_to_rf(dev); struct irdma_cqp *iwcqp = &rf->cqp; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; int status; cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait); if (!cqp_request) return -ENOMEM; cqp_info = &cqp_request->info; memset(cqp_info, 0, sizeof(*cqp_info)); cqp_info->cqp_cmd = IRDMA_OP_STATS_GATHER; cqp_info->post_sq = 1; cqp_info->in.u.stats_gather.info = pestat->gather_info; cqp_info->in.u.stats_gather.scratch = (uintptr_t)cqp_request; cqp_info->in.u.stats_gather.cqp = &rf->cqp.sc_cqp; cqp_request->param = pestat; if (!wait) cqp_request->callback_fcn = irdma_process_cqp_stats; status = irdma_handle_cqp_op(rf, cqp_request); if (wait) irdma_process_stats(pestat); irdma_put_cqp_request(&rf->cqp, cqp_request); return status; } /** * irdma_cqp_stats_inst_cmd - Allocate/free stats instance * @vsi: pointer to vsi structure * @cmd: command to allocate or free * @stats_info: pointer to allocate stats info */ int irdma_cqp_stats_inst_cmd(struct irdma_sc_vsi *vsi, u8 cmd, struct irdma_stats_inst_info *stats_info) { struct irdma_pci_f *rf = dev_to_rf(vsi->dev); struct irdma_cqp *iwcqp = &rf->cqp; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; int status; bool wait = false; if (cmd == IRDMA_OP_STATS_ALLOCATE) wait = true; cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait); if (!cqp_request) return -ENOMEM; cqp_info = &cqp_request->info; memset(cqp_info, 0, sizeof(*cqp_info)); cqp_info->cqp_cmd = cmd; cqp_info->post_sq = 1; cqp_info->in.u.stats_manage.info = *stats_info; cqp_info->in.u.stats_manage.scratch = (uintptr_t)cqp_request; cqp_info->in.u.stats_manage.cqp = &rf->cqp.sc_cqp; status = irdma_handle_cqp_op(rf, cqp_request); if (wait) stats_info->stats_idx = cqp_request->compl_info.op_ret_val; irdma_put_cqp_request(iwcqp, cqp_request); return status; } /** * irdma_cqp_ceq_cmd - Create/Destroy CEQ's after CEQ 0 * @dev: pointer to device info * @sc_ceq: pointer to ceq structure * @op: Create or Destroy */ int irdma_cqp_ceq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_ceq *sc_ceq, u8 op) { struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; struct irdma_pci_f *rf = dev_to_rf(dev); int status; cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); if (!cqp_request) return -ENOMEM; cqp_info = &cqp_request->info; cqp_info->post_sq = 1; cqp_info->cqp_cmd = op; cqp_info->in.u.ceq_create.ceq = sc_ceq; cqp_info->in.u.ceq_create.scratch = (uintptr_t)cqp_request; status = irdma_handle_cqp_op(rf, cqp_request); irdma_put_cqp_request(&rf->cqp, cqp_request); return status; } /** * irdma_cqp_aeq_cmd - Create/Destroy AEQ * @dev: pointer to device info * @sc_aeq: pointer to aeq structure * @op: Create or Destroy */ int irdma_cqp_aeq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_aeq *sc_aeq, u8 op) { struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; struct irdma_pci_f *rf = dev_to_rf(dev); int status; cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); if (!cqp_request) return -ENOMEM; cqp_info = &cqp_request->info; cqp_info->post_sq = 1; cqp_info->cqp_cmd = op; cqp_info->in.u.aeq_create.aeq = sc_aeq; cqp_info->in.u.aeq_create.scratch = (uintptr_t)cqp_request; status = irdma_handle_cqp_op(rf, cqp_request); irdma_put_cqp_request(&rf->cqp, cqp_request); return status; } /** * irdma_cqp_ws_node_cmd - Add/modify/delete ws node * @dev: pointer to device structure * @cmd: Add, modify or delete * @node_info: pointer to ws node info */ int irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd, struct irdma_ws_node_info *node_info) { struct irdma_pci_f *rf = dev_to_rf(dev); struct irdma_cqp *iwcqp = &rf->cqp; struct irdma_sc_cqp *cqp = &iwcqp->sc_cqp; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; int status; bool poll; if (!rf->sc_dev.ceq_valid) poll = true; else poll = false; cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, !poll); if (!cqp_request) return -ENOMEM; cqp_info = &cqp_request->info; memset(cqp_info, 0, sizeof(*cqp_info)); cqp_info->cqp_cmd = cmd; cqp_info->post_sq = 1; cqp_info->in.u.ws_node.info = *node_info; cqp_info->in.u.ws_node.cqp = cqp; cqp_info->in.u.ws_node.scratch = (uintptr_t)cqp_request; status = irdma_handle_cqp_op(rf, cqp_request); if (status) goto exit; if (poll) { struct irdma_ccq_cqe_info compl_info; status = irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_WORK_SCHED_NODE, &compl_info); node_info->qs_handle = compl_info.op_ret_val; ibdev_dbg(&rf->iwdev->ibdev, "DCB: opcode=%d, compl_info.retval=%d\n", compl_info.op_code, compl_info.op_ret_val); } else { node_info->qs_handle = cqp_request->compl_info.op_ret_val; } exit: irdma_put_cqp_request(&rf->cqp, cqp_request); return status; } /** * irdma_ah_cqp_op - perform an AH cqp operation * @rf: RDMA PCI function * @sc_ah: address handle * @cmd: AH operation * @wait: wait if true * @callback_fcn: Callback function on CQP op completion * @cb_param: parameter for callback function * * returns errno */ int irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd, bool wait, void (*callback_fcn)(struct irdma_cqp_request *), void *cb_param) { struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; int status; if (cmd != IRDMA_OP_AH_CREATE && cmd != IRDMA_OP_AH_DESTROY) return -EINVAL; cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait); if (!cqp_request) return -ENOMEM; cqp_info = &cqp_request->info; cqp_info->cqp_cmd = cmd; cqp_info->post_sq = 1; if (cmd == IRDMA_OP_AH_CREATE) { cqp_info->in.u.ah_create.info = sc_ah->ah_info; cqp_info->in.u.ah_create.scratch = (uintptr_t)cqp_request; cqp_info->in.u.ah_create.cqp = &rf->cqp.sc_cqp; } else if (cmd == IRDMA_OP_AH_DESTROY) { cqp_info->in.u.ah_destroy.info = sc_ah->ah_info; cqp_info->in.u.ah_destroy.scratch = (uintptr_t)cqp_request; cqp_info->in.u.ah_destroy.cqp = &rf->cqp.sc_cqp; } if (!wait) { cqp_request->callback_fcn = callback_fcn; cqp_request->param = cb_param; } status = irdma_handle_cqp_op(rf, cqp_request); irdma_put_cqp_request(&rf->cqp, cqp_request); if (status) return -ENOMEM; if (wait) sc_ah->ah_info.ah_valid = (cmd == IRDMA_OP_AH_CREATE); return 0; } /** * irdma_ieq_ah_cb - callback after creation of AH for IEQ * @cqp_request: pointer to cqp_request of create AH */ static void irdma_ieq_ah_cb(struct irdma_cqp_request *cqp_request) { struct irdma_sc_qp *qp = cqp_request->param; struct irdma_sc_ah *sc_ah = qp->pfpdu.ah; unsigned long flags; spin_lock_irqsave(&qp->pfpdu.lock, flags); if (!cqp_request->compl_info.op_ret_val) { sc_ah->ah_info.ah_valid = true; irdma_ieq_process_fpdus(qp, qp->vsi->ieq); } else { sc_ah->ah_info.ah_valid = false; irdma_ieq_cleanup_qp(qp->vsi->ieq, qp); } spin_unlock_irqrestore(&qp->pfpdu.lock, flags); } /** * irdma_ilq_ah_cb - callback after creation of AH for ILQ * @cqp_request: pointer to cqp_request of create AH */ static void irdma_ilq_ah_cb(struct irdma_cqp_request *cqp_request) { struct irdma_cm_node *cm_node = cqp_request->param; struct irdma_sc_ah *sc_ah = cm_node->ah; sc_ah->ah_info.ah_valid = !cqp_request->compl_info.op_ret_val; irdma_add_conn_est_qh(cm_node); } /** * irdma_puda_create_ah - create AH for ILQ/IEQ qp's * @dev: device pointer * @ah_info: Address handle info * @wait: When true will wait for operation to complete * @type: ILQ/IEQ * @cb_param: Callback param when not waiting * @ah_ret: Returned pointer to address handle if created * */ int irdma_puda_create_ah(struct irdma_sc_dev *dev, struct irdma_ah_info *ah_info, bool wait, enum puda_rsrc_type type, void *cb_param, struct irdma_sc_ah **ah_ret) { struct irdma_sc_ah *ah; struct irdma_pci_f *rf = dev_to_rf(dev); int err; ah = kzalloc(sizeof(*ah), GFP_ATOMIC); *ah_ret = ah; if (!ah) return -ENOMEM; err = irdma_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah, &ah_info->ah_idx, &rf->next_ah); if (err) goto err_free; ah->dev = dev; ah->ah_info = *ah_info; if (type == IRDMA_PUDA_RSRC_TYPE_ILQ) err = irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_CREATE, wait, irdma_ilq_ah_cb, cb_param); else err = irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_CREATE, wait, irdma_ieq_ah_cb, cb_param); if (err) goto error; return 0; error: irdma_free_rsrc(rf, rf->allocated_ahs, ah->ah_info.ah_idx); err_free: kfree(ah); *ah_ret = NULL; return -ENOMEM; } /** * irdma_puda_free_ah - free a puda address handle * @dev: device pointer * @ah: The address handle to free */ void irdma_puda_free_ah(struct irdma_sc_dev *dev, struct irdma_sc_ah *ah) { struct irdma_pci_f *rf = dev_to_rf(dev); if (!ah) return; if (ah->ah_info.ah_valid) { irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_DESTROY, false, NULL, NULL); irdma_free_rsrc(rf, rf->allocated_ahs, ah->ah_info.ah_idx); } kfree(ah); } /** * irdma_gsi_ud_qp_ah_cb - callback after creation of AH for GSI/ID QP * @cqp_request: pointer to cqp_request of create AH */ void irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request) { struct irdma_sc_ah *sc_ah = cqp_request->param; if (!cqp_request->compl_info.op_ret_val) sc_ah->ah_info.ah_valid = true; else sc_ah->ah_info.ah_valid = false; } /** * irdma_prm_add_pble_mem - add moemory to pble resources * @pprm: pble resource manager * @pchunk: chunk of memory to add */ int irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm, struct irdma_chunk *pchunk) { u64 sizeofbitmap; if (pchunk->size & 0xfff) return -EINVAL; sizeofbitmap = (u64)pchunk->size >> pprm->pble_shift; pchunk->bitmapbuf = bitmap_zalloc(sizeofbitmap, GFP_KERNEL); if (!pchunk->bitmapbuf) return -ENOMEM; pchunk->sizeofbitmap = sizeofbitmap; /* each pble is 8 bytes hence shift by 3 */ pprm->total_pble_alloc += pchunk->size >> 3; pprm->free_pble_cnt += pchunk->size >> 3; return 0; } /** * irdma_prm_get_pbles - get pble's from prm * @pprm: pble resource manager * @chunkinfo: nformation about chunk where pble's were acquired * @mem_size: size of pble memory needed * @vaddr: returns virtual address of pble memory * @fpm_addr: returns fpm address of pble memory */ int irdma_prm_get_pbles(struct irdma_pble_prm *pprm, struct irdma_pble_chunkinfo *chunkinfo, u64 mem_size, u64 **vaddr, u64 *fpm_addr) { u64 bits_needed; u64 bit_idx = PBLE_INVALID_IDX; struct irdma_chunk *pchunk = NULL; struct list_head *chunk_entry = pprm->clist.next; u32 offset; unsigned long flags; *vaddr = NULL; *fpm_addr = 0; bits_needed = DIV_ROUND_UP_ULL(mem_size, BIT_ULL(pprm->pble_shift)); spin_lock_irqsave(&pprm->prm_lock, flags); while (chunk_entry != &pprm->clist) { pchunk = (struct irdma_chunk *)chunk_entry; bit_idx = bitmap_find_next_zero_area(pchunk->bitmapbuf, pchunk->sizeofbitmap, 0, bits_needed, 0); if (bit_idx < pchunk->sizeofbitmap) break; /* list.next used macro */ chunk_entry = pchunk->list.next; } if (!pchunk || bit_idx >= pchunk->sizeofbitmap) { spin_unlock_irqrestore(&pprm->prm_lock, flags); return -ENOMEM; } bitmap_set(pchunk->bitmapbuf, bit_idx, bits_needed); offset = bit_idx << pprm->pble_shift; *vaddr = pchunk->vaddr + offset; *fpm_addr = pchunk->fpm_addr + offset; chunkinfo->pchunk = pchunk; chunkinfo->bit_idx = bit_idx; chunkinfo->bits_used = bits_needed; /* 3 is sizeof pble divide */ pprm->free_pble_cnt -= chunkinfo->bits_used << (pprm->pble_shift - 3); spin_unlock_irqrestore(&pprm->prm_lock, flags); return 0; } /** * irdma_prm_return_pbles - return pbles back to prm * @pprm: pble resource manager * @chunkinfo: chunk where pble's were acquired and to be freed */ void irdma_prm_return_pbles(struct irdma_pble_prm *pprm, struct irdma_pble_chunkinfo *chunkinfo) { unsigned long flags; spin_lock_irqsave(&pprm->prm_lock, flags); pprm->free_pble_cnt += chunkinfo->bits_used << (pprm->pble_shift - 3); bitmap_clear(chunkinfo->pchunk->bitmapbuf, chunkinfo->bit_idx, chunkinfo->bits_used); spin_unlock_irqrestore(&pprm->prm_lock, flags); } int irdma_map_vm_page_list(struct irdma_hw *hw, void *va, dma_addr_t *pg_dma, u32 pg_cnt) { struct page *vm_page; int i; u8 *addr; addr = (u8 *)(uintptr_t)va; for (i = 0; i < pg_cnt; i++) { vm_page = vmalloc_to_page(addr); if (!vm_page) goto err; pg_dma[i] = dma_map_page(hw->device, vm_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(hw->device, pg_dma[i])) goto err; addr += PAGE_SIZE; } return 0; err: irdma_unmap_vm_page_list(hw, pg_dma, i); return -ENOMEM; } void irdma_unmap_vm_page_list(struct irdma_hw *hw, dma_addr_t *pg_dma, u32 pg_cnt) { int i; for (i = 0; i < pg_cnt; i++) dma_unmap_page(hw->device, pg_dma[i], PAGE_SIZE, DMA_BIDIRECTIONAL); } /** * irdma_pble_free_paged_mem - free virtual paged memory * @chunk: chunk to free with paged memory */ void irdma_pble_free_paged_mem(struct irdma_chunk *chunk) { if (!chunk->pg_cnt) goto done; irdma_unmap_vm_page_list(chunk->dev->hw, chunk->dmainfo.dmaaddrs, chunk->pg_cnt); done: kfree(chunk->dmainfo.dmaaddrs); chunk->dmainfo.dmaaddrs = NULL; vfree(chunk->vaddr); chunk->vaddr = NULL; chunk->type = 0; } /** * irdma_pble_get_paged_mem -allocate paged memory for pbles * @chunk: chunk to add for paged memory * @pg_cnt: number of pages needed */ int irdma_pble_get_paged_mem(struct irdma_chunk *chunk, u32 pg_cnt) { u32 size; void *va; chunk->dmainfo.dmaaddrs = kzalloc(pg_cnt << 3, GFP_KERNEL); if (!chunk->dmainfo.dmaaddrs) return -ENOMEM; size = PAGE_SIZE * pg_cnt; va = vmalloc(size); if (!va) goto err; if (irdma_map_vm_page_list(chunk->dev->hw, va, chunk->dmainfo.dmaaddrs, pg_cnt)) { vfree(va); goto err; } chunk->vaddr = va; chunk->size = size; chunk->pg_cnt = pg_cnt; chunk->type = PBLE_SD_PAGED; return 0; err: kfree(chunk->dmainfo.dmaaddrs); chunk->dmainfo.dmaaddrs = NULL; return -ENOMEM; } /** * irdma_alloc_ws_node_id - Allocate a tx scheduler node ID * @dev: device pointer */ u16 irdma_alloc_ws_node_id(struct irdma_sc_dev *dev) { struct irdma_pci_f *rf = dev_to_rf(dev); u32 next = 1; u32 node_id; if (irdma_alloc_rsrc(rf, rf->allocated_ws_nodes, rf->max_ws_node_id, &node_id, &next)) return IRDMA_WS_NODE_INVALID; return (u16)node_id; } /** * irdma_free_ws_node_id - Free a tx scheduler node ID * @dev: device pointer * @node_id: Work scheduler node ID */ void irdma_free_ws_node_id(struct irdma_sc_dev *dev, u16 node_id) { struct irdma_pci_f *rf = dev_to_rf(dev); irdma_free_rsrc(rf, rf->allocated_ws_nodes, (u32)node_id); } /** * irdma_modify_qp_to_err - Modify a QP to error * @sc_qp: qp structure */ void irdma_modify_qp_to_err(struct irdma_sc_qp *sc_qp) { struct irdma_qp *qp = sc_qp->qp_uk.back_qp; struct ib_qp_attr attr; if (qp->iwdev->rf->reset) return; attr.qp_state = IB_QPS_ERR; if (rdma_protocol_roce(qp->ibqp.device, 1)) irdma_modify_qp_roce(&qp->ibqp, &attr, IB_QP_STATE, NULL); else irdma_modify_qp(&qp->ibqp, &attr, IB_QP_STATE, NULL); } void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event) { struct ib_event ibevent; if (!iwqp->ibqp.event_handler) return; switch (event) { case IRDMA_QP_EVENT_CATASTROPHIC: ibevent.event = IB_EVENT_QP_FATAL; break; case IRDMA_QP_EVENT_ACCESS_ERR: ibevent.event = IB_EVENT_QP_ACCESS_ERR; break; case IRDMA_QP_EVENT_REQ_ERR: ibevent.event = IB_EVENT_QP_REQ_ERR; break; } ibevent.device = iwqp->ibqp.device; ibevent.element.qp = &iwqp->ibqp; iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context); } bool irdma_cq_empty(struct irdma_cq *iwcq) { struct irdma_cq_uk *ukcq; u64 qword3; __le64 *cqe; u8 polarity; ukcq = &iwcq->sc_cq.cq_uk; cqe = IRDMA_GET_CURRENT_CQ_ELEM(ukcq); get_64bit_val(cqe, 24, &qword3); polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3); return polarity != ukcq->polarity; } void irdma_remove_cmpls_list(struct irdma_cq *iwcq) { struct irdma_cmpl_gen *cmpl_node; struct list_head *tmp_node, *list_node; list_for_each_safe (list_node, tmp_node, &iwcq->cmpl_generated) { cmpl_node = list_entry(list_node, struct irdma_cmpl_gen, list); list_del(&cmpl_node->list); kfree(cmpl_node); } } int irdma_generated_cmpls(struct irdma_cq *iwcq, struct irdma_cq_poll_info *cq_poll_info) { struct irdma_cmpl_gen *cmpl; if (list_empty(&iwcq->cmpl_generated)) return -ENOENT; cmpl = list_first_entry_or_null(&iwcq->cmpl_generated, struct irdma_cmpl_gen, list); list_del(&cmpl->list); memcpy(cq_poll_info, &cmpl->cpi, sizeof(*cq_poll_info)); kfree(cmpl); ibdev_dbg(iwcq->ibcq.device, "VERBS: %s: Poll artificially generated completion for QP 0x%X, op %u, wr_id=0x%llx\n", __func__, cq_poll_info->qp_id, cq_poll_info->op_type, cq_poll_info->wr_id); return 0; } /** * irdma_set_cpi_common_values - fill in values for polling info struct * @cpi: resulting structure of cq_poll_info type * @qp: QPair * @qp_num: id of the QP */ static void irdma_set_cpi_common_values(struct irdma_cq_poll_info *cpi, struct irdma_qp_uk *qp, u32 qp_num) { cpi->comp_status = IRDMA_COMPL_STATUS_FLUSHED; cpi->error = true; cpi->major_err = IRDMA_FLUSH_MAJOR_ERR; cpi->minor_err = FLUSH_GENERAL_ERR; cpi->qp_handle = (irdma_qp_handle)(uintptr_t)qp; cpi->qp_id = qp_num; } static inline void irdma_comp_handler(struct irdma_cq *cq) { if (!cq->ibcq.comp_handler) return; if (atomic_cmpxchg(&cq->armed, 1, 0)) cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); } void irdma_generate_flush_completions(struct irdma_qp *iwqp) { struct irdma_qp_uk *qp = &iwqp->sc_qp.qp_uk; struct irdma_ring *sq_ring = &qp->sq_ring; struct irdma_ring *rq_ring = &qp->rq_ring; struct irdma_cmpl_gen *cmpl; __le64 *sw_wqe; u64 wqe_qword; u32 wqe_idx; bool compl_generated = false; unsigned long flags1; spin_lock_irqsave(&iwqp->iwscq->lock, flags1); if (irdma_cq_empty(iwqp->iwscq)) { unsigned long flags2; spin_lock_irqsave(&iwqp->lock, flags2); while (IRDMA_RING_MORE_WORK(*sq_ring)) { cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC); if (!cmpl) { spin_unlock_irqrestore(&iwqp->lock, flags2); spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1); return; } wqe_idx = sq_ring->tail; irdma_set_cpi_common_values(&cmpl->cpi, qp, qp->qp_id); cmpl->cpi.wr_id = qp->sq_wrtrk_array[wqe_idx].wrid; sw_wqe = qp->sq_base[wqe_idx].elem; get_64bit_val(sw_wqe, 24, &wqe_qword); cmpl->cpi.op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, IRDMAQPSQ_OPCODE); cmpl->cpi.q_type = IRDMA_CQE_QTYPE_SQ; /* remove the SQ WR by moving SQ tail*/ IRDMA_RING_SET_TAIL(*sq_ring, sq_ring->tail + qp->sq_wrtrk_array[sq_ring->tail].quanta); if (cmpl->cpi.op_type == IRDMAQP_OP_NOP) { kfree(cmpl); continue; } ibdev_dbg(iwqp->iwscq->ibcq.device, "DEV: %s: adding wr_id = 0x%llx SQ Completion to list qp_id=%d\n", __func__, cmpl->cpi.wr_id, qp->qp_id); list_add_tail(&cmpl->list, &iwqp->iwscq->cmpl_generated); compl_generated = true; } spin_unlock_irqrestore(&iwqp->lock, flags2); spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1); if (compl_generated) irdma_comp_handler(iwqp->iwscq); } else { spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1); mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS)); } spin_lock_irqsave(&iwqp->iwrcq->lock, flags1); if (irdma_cq_empty(iwqp->iwrcq)) { unsigned long flags2; spin_lock_irqsave(&iwqp->lock, flags2); while (IRDMA_RING_MORE_WORK(*rq_ring)) { cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC); if (!cmpl) { spin_unlock_irqrestore(&iwqp->lock, flags2); spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1); return; } wqe_idx = rq_ring->tail; irdma_set_cpi_common_values(&cmpl->cpi, qp, qp->qp_id); cmpl->cpi.wr_id = qp->rq_wrid_array[wqe_idx]; cmpl->cpi.op_type = IRDMA_OP_TYPE_REC; cmpl->cpi.q_type = IRDMA_CQE_QTYPE_RQ; /* remove the RQ WR by moving RQ tail */ IRDMA_RING_SET_TAIL(*rq_ring, rq_ring->tail + 1); ibdev_dbg(iwqp->iwrcq->ibcq.device, "DEV: %s: adding wr_id = 0x%llx RQ Completion to list qp_id=%d, wqe_idx=%d\n", __func__, cmpl->cpi.wr_id, qp->qp_id, wqe_idx); list_add_tail(&cmpl->list, &iwqp->iwrcq->cmpl_generated); compl_generated = true; } spin_unlock_irqrestore(&iwqp->lock, flags2); spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1); if (compl_generated) irdma_comp_handler(iwqp->iwrcq); } else { spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1); mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS)); } }
linux-master
drivers/infiniband/hw/irdma/utils.c
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB /* Copyright (c) 2015 - 2021 Intel Corporation */ #include "main.h" #include "../../../net/ethernet/intel/ice/ice.h" MODULE_ALIAS("i40iw"); MODULE_AUTHOR("Intel Corporation, <[email protected]>"); MODULE_DESCRIPTION("Intel(R) Ethernet Protocol Driver for RDMA"); MODULE_LICENSE("Dual BSD/GPL"); static struct notifier_block irdma_inetaddr_notifier = { .notifier_call = irdma_inetaddr_event }; static struct notifier_block irdma_inetaddr6_notifier = { .notifier_call = irdma_inet6addr_event }; static struct notifier_block irdma_net_notifier = { .notifier_call = irdma_net_event }; static struct notifier_block irdma_netdevice_notifier = { .notifier_call = irdma_netdevice_event }; static void irdma_register_notifiers(void) { register_inetaddr_notifier(&irdma_inetaddr_notifier); register_inet6addr_notifier(&irdma_inetaddr6_notifier); register_netevent_notifier(&irdma_net_notifier); register_netdevice_notifier(&irdma_netdevice_notifier); } static void irdma_unregister_notifiers(void) { unregister_netevent_notifier(&irdma_net_notifier); unregister_inetaddr_notifier(&irdma_inetaddr_notifier); unregister_inet6addr_notifier(&irdma_inetaddr6_notifier); unregister_netdevice_notifier(&irdma_netdevice_notifier); } static void irdma_prep_tc_change(struct irdma_device *iwdev) { iwdev->vsi.tc_change_pending = true; irdma_sc_suspend_resume_qps(&iwdev->vsi, IRDMA_OP_SUSPEND); /* Wait for all qp's to suspend */ wait_event_timeout(iwdev->suspend_wq, !atomic_read(&iwdev->vsi.qp_suspend_reqs), IRDMA_EVENT_TIMEOUT); irdma_ws_reset(&iwdev->vsi); } static void irdma_log_invalid_mtu(u16 mtu, struct irdma_sc_dev *dev) { if (mtu < IRDMA_MIN_MTU_IPV4) ibdev_warn(to_ibdev(dev), "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 576 for IPv4\n", mtu); else if (mtu < IRDMA_MIN_MTU_IPV6) ibdev_warn(to_ibdev(dev), "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 1280 for IPv6\\n", mtu); } static void irdma_fill_qos_info(struct irdma_l2params *l2params, struct iidc_qos_params *qos_info) { int i; l2params->num_tc = qos_info->num_tc; l2params->vsi_prio_type = qos_info->vport_priority_type; l2params->vsi_rel_bw = qos_info->vport_relative_bw; for (i = 0; i < l2params->num_tc; i++) { l2params->tc_info[i].egress_virt_up = qos_info->tc_info[i].egress_virt_up; l2params->tc_info[i].ingress_virt_up = qos_info->tc_info[i].ingress_virt_up; l2params->tc_info[i].prio_type = qos_info->tc_info[i].prio_type; l2params->tc_info[i].rel_bw = qos_info->tc_info[i].rel_bw; l2params->tc_info[i].tc_ctx = qos_info->tc_info[i].tc_ctx; } for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++) l2params->up2tc[i] = qos_info->up2tc[i]; if (qos_info->pfc_mode == IIDC_DSCP_PFC_MODE) { l2params->dscp_mode = true; memcpy(l2params->dscp_map, qos_info->dscp_map, sizeof(l2params->dscp_map)); } } static void irdma_iidc_event_handler(struct ice_pf *pf, struct iidc_event *event) { struct irdma_device *iwdev = dev_get_drvdata(&pf->adev->dev); struct irdma_l2params l2params = {}; if (*event->type & BIT(IIDC_EVENT_AFTER_MTU_CHANGE)) { ibdev_dbg(&iwdev->ibdev, "CLNT: new MTU = %d\n", iwdev->netdev->mtu); if (iwdev->vsi.mtu != iwdev->netdev->mtu) { l2params.mtu = iwdev->netdev->mtu; l2params.mtu_changed = true; irdma_log_invalid_mtu(l2params.mtu, &iwdev->rf->sc_dev); irdma_change_l2params(&iwdev->vsi, &l2params); } } else if (*event->type & BIT(IIDC_EVENT_BEFORE_TC_CHANGE)) { if (iwdev->vsi.tc_change_pending) return; irdma_prep_tc_change(iwdev); } else if (*event->type & BIT(IIDC_EVENT_AFTER_TC_CHANGE)) { struct iidc_qos_params qos_info = {}; if (!iwdev->vsi.tc_change_pending) return; l2params.tc_changed = true; ibdev_dbg(&iwdev->ibdev, "CLNT: TC Change\n"); ice_get_qos_params(pf, &qos_info); irdma_fill_qos_info(&l2params, &qos_info); if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY) iwdev->dcb_vlan_mode = qos_info.num_tc > 1 && !l2params.dscp_mode; irdma_change_l2params(&iwdev->vsi, &l2params); } else if (*event->type & BIT(IIDC_EVENT_CRIT_ERR)) { ibdev_warn(&iwdev->ibdev, "ICE OICR event notification: oicr = 0x%08x\n", event->reg); if (event->reg & IRDMAPFINT_OICR_PE_CRITERR_M) { u32 pe_criterr; pe_criterr = readl(iwdev->rf->sc_dev.hw_regs[IRDMA_GLPE_CRITERR]); #define IRDMA_Q1_RESOURCE_ERR 0x0001024d if (pe_criterr != IRDMA_Q1_RESOURCE_ERR) { ibdev_err(&iwdev->ibdev, "critical PE Error, GLPE_CRITERR=0x%08x\n", pe_criterr); iwdev->rf->reset = true; } else { ibdev_warn(&iwdev->ibdev, "Q1 Resource Check\n"); } } if (event->reg & IRDMAPFINT_OICR_HMC_ERR_M) { ibdev_err(&iwdev->ibdev, "HMC Error\n"); iwdev->rf->reset = true; } if (event->reg & IRDMAPFINT_OICR_PE_PUSH_M) { ibdev_err(&iwdev->ibdev, "PE Push Error\n"); iwdev->rf->reset = true; } if (iwdev->rf->reset) iwdev->rf->gen_ops.request_reset(iwdev->rf); } } /** * irdma_request_reset - Request a reset * @rf: RDMA PCI function */ static void irdma_request_reset(struct irdma_pci_f *rf) { struct ice_pf *pf = rf->cdev; ibdev_warn(&rf->iwdev->ibdev, "Requesting a reset\n"); ice_rdma_request_reset(pf, IIDC_PFR); } /** * irdma_lan_register_qset - Register qset with LAN driver * @vsi: vsi structure * @tc_node: Traffic class node */ static int irdma_lan_register_qset(struct irdma_sc_vsi *vsi, struct irdma_ws_node *tc_node) { struct irdma_device *iwdev = vsi->back_vsi; struct ice_pf *pf = iwdev->rf->cdev; struct iidc_rdma_qset_params qset = {}; int ret; qset.qs_handle = tc_node->qs_handle; qset.tc = tc_node->traffic_class; qset.vport_id = vsi->vsi_idx; ret = ice_add_rdma_qset(pf, &qset); if (ret) { ibdev_dbg(&iwdev->ibdev, "WS: LAN alloc_res for rdma qset failed.\n"); return ret; } tc_node->l2_sched_node_id = qset.teid; vsi->qos[tc_node->user_pri].l2_sched_node_id = qset.teid; return 0; } /** * irdma_lan_unregister_qset - Unregister qset with LAN driver * @vsi: vsi structure * @tc_node: Traffic class node */ static void irdma_lan_unregister_qset(struct irdma_sc_vsi *vsi, struct irdma_ws_node *tc_node) { struct irdma_device *iwdev = vsi->back_vsi; struct ice_pf *pf = iwdev->rf->cdev; struct iidc_rdma_qset_params qset = {}; qset.qs_handle = tc_node->qs_handle; qset.tc = tc_node->traffic_class; qset.vport_id = vsi->vsi_idx; qset.teid = tc_node->l2_sched_node_id; if (ice_del_rdma_qset(pf, &qset)) ibdev_dbg(&iwdev->ibdev, "WS: LAN free_res for rdma qset failed.\n"); } static void irdma_remove(struct auxiliary_device *aux_dev) { struct iidc_auxiliary_dev *iidc_adev = container_of(aux_dev, struct iidc_auxiliary_dev, adev); struct ice_pf *pf = iidc_adev->pf; struct irdma_device *iwdev = auxiliary_get_drvdata(aux_dev); irdma_ib_unregister_device(iwdev); ice_rdma_update_vsi_filter(pf, iwdev->vsi_num, false); pr_debug("INIT: Gen2 PF[%d] device remove success\n", PCI_FUNC(pf->pdev->devfn)); } static void irdma_fill_device_info(struct irdma_device *iwdev, struct ice_pf *pf, struct ice_vsi *vsi) { struct irdma_pci_f *rf = iwdev->rf; rf->cdev = pf; rf->gen_ops.register_qset = irdma_lan_register_qset; rf->gen_ops.unregister_qset = irdma_lan_unregister_qset; rf->hw.hw_addr = pf->hw.hw_addr; rf->pcidev = pf->pdev; rf->msix_count = pf->num_rdma_msix; rf->pf_id = pf->hw.pf_id; rf->msix_entries = &pf->msix_entries[pf->rdma_base_vector]; rf->default_vsi.vsi_idx = vsi->vsi_num; rf->protocol_used = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? IRDMA_ROCE_PROTOCOL_ONLY : IRDMA_IWARP_PROTOCOL_ONLY; rf->rdma_ver = IRDMA_GEN_2; rf->rsrc_profile = IRDMA_HMC_PROFILE_DEFAULT; rf->rst_to = IRDMA_RST_TIMEOUT_HZ; rf->gen_ops.request_reset = irdma_request_reset; rf->limits_sel = 7; rf->iwdev = iwdev; mutex_init(&iwdev->ah_tbl_lock); iwdev->netdev = vsi->netdev; iwdev->vsi_num = vsi->vsi_num; iwdev->init_state = INITIAL_STATE; iwdev->roce_cwnd = IRDMA_ROCE_CWND_DEFAULT; iwdev->roce_ackcreds = IRDMA_ROCE_ACKCREDS_DEFAULT; iwdev->rcv_wnd = IRDMA_CM_DEFAULT_RCV_WND_SCALED; iwdev->rcv_wscale = IRDMA_CM_DEFAULT_RCV_WND_SCALE; if (rf->protocol_used == IRDMA_ROCE_PROTOCOL_ONLY) iwdev->roce_mode = true; } static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_device_id *id) { struct iidc_auxiliary_dev *iidc_adev = container_of(aux_dev, struct iidc_auxiliary_dev, adev); struct ice_pf *pf = iidc_adev->pf; struct ice_vsi *vsi = ice_get_main_vsi(pf); struct iidc_qos_params qos_info = {}; struct irdma_device *iwdev; struct irdma_pci_f *rf; struct irdma_l2params l2params = {}; int err; if (!vsi) return -EIO; iwdev = ib_alloc_device(irdma_device, ibdev); if (!iwdev) return -ENOMEM; iwdev->rf = kzalloc(sizeof(*rf), GFP_KERNEL); if (!iwdev->rf) { ib_dealloc_device(&iwdev->ibdev); return -ENOMEM; } irdma_fill_device_info(iwdev, pf, vsi); rf = iwdev->rf; err = irdma_ctrl_init_hw(rf); if (err) goto err_ctrl_init; l2params.mtu = iwdev->netdev->mtu; ice_get_qos_params(pf, &qos_info); irdma_fill_qos_info(&l2params, &qos_info); if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY) iwdev->dcb_vlan_mode = l2params.num_tc > 1 && !l2params.dscp_mode; err = irdma_rt_init_hw(iwdev, &l2params); if (err) goto err_rt_init; err = irdma_ib_register_device(iwdev); if (err) goto err_ibreg; ice_rdma_update_vsi_filter(pf, iwdev->vsi_num, true); ibdev_dbg(&iwdev->ibdev, "INIT: Gen2 PF[%d] device probe success\n", PCI_FUNC(rf->pcidev->devfn)); auxiliary_set_drvdata(aux_dev, iwdev); return 0; err_ibreg: irdma_rt_deinit_hw(iwdev); err_rt_init: irdma_ctrl_deinit_hw(rf); err_ctrl_init: kfree(iwdev->rf); ib_dealloc_device(&iwdev->ibdev); return err; } static const struct auxiliary_device_id irdma_auxiliary_id_table[] = { {.name = "ice.iwarp", }, {.name = "ice.roce", }, {}, }; MODULE_DEVICE_TABLE(auxiliary, irdma_auxiliary_id_table); static struct iidc_auxiliary_drv irdma_auxiliary_drv = { .adrv = { .id_table = irdma_auxiliary_id_table, .probe = irdma_probe, .remove = irdma_remove, }, .event_handler = irdma_iidc_event_handler, }; static int __init irdma_init_module(void) { int ret; ret = auxiliary_driver_register(&i40iw_auxiliary_drv); if (ret) { pr_err("Failed i40iw(gen_1) auxiliary_driver_register() ret=%d\n", ret); return ret; } ret = auxiliary_driver_register(&irdma_auxiliary_drv.adrv); if (ret) { auxiliary_driver_unregister(&i40iw_auxiliary_drv); pr_err("Failed irdma auxiliary_driver_register() ret=%d\n", ret); return ret; } irdma_register_notifiers(); return 0; } static void __exit irdma_exit_module(void) { irdma_unregister_notifiers(); auxiliary_driver_unregister(&irdma_auxiliary_drv.adrv); auxiliary_driver_unregister(&i40iw_auxiliary_drv); } module_init(irdma_init_module); module_exit(irdma_exit_module);
linux-master
drivers/infiniband/hw/irdma/main.c
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB /* Copyright (c) 2017 - 2021 Intel Corporation */ #include "osdep.h" #include "hmc.h" #include "defs.h" #include "type.h" #include "protos.h" #include "ws.h" /** * irdma_alloc_node - Allocate a WS node and init * @vsi: vsi pointer * @user_pri: user priority * @node_type: Type of node, leaf or parent * @parent: parent node pointer */ static struct irdma_ws_node *irdma_alloc_node(struct irdma_sc_vsi *vsi, u8 user_pri, enum irdma_ws_node_type node_type, struct irdma_ws_node *parent) { struct irdma_virt_mem ws_mem; struct irdma_ws_node *node; u16 node_index = 0; ws_mem.size = sizeof(struct irdma_ws_node); ws_mem.va = kzalloc(ws_mem.size, GFP_KERNEL); if (!ws_mem.va) return NULL; if (parent) { node_index = irdma_alloc_ws_node_id(vsi->dev); if (node_index == IRDMA_WS_NODE_INVALID) { kfree(ws_mem.va); return NULL; } } node = ws_mem.va; node->index = node_index; node->vsi_index = vsi->vsi_idx; INIT_LIST_HEAD(&node->child_list_head); if (node_type == WS_NODE_TYPE_LEAF) { node->type_leaf = true; node->traffic_class = vsi->qos[user_pri].traffic_class; node->user_pri = user_pri; node->rel_bw = vsi->qos[user_pri].rel_bw; if (!node->rel_bw) node->rel_bw = 1; node->lan_qs_handle = vsi->qos[user_pri].lan_qos_handle; node->prio_type = IRDMA_PRIO_WEIGHTED_RR; } else { node->rel_bw = 1; node->prio_type = IRDMA_PRIO_WEIGHTED_RR; node->enable = true; } node->parent = parent; return node; } /** * irdma_free_node - Free a WS node * @vsi: VSI stricture of device * @node: Pointer to node to free */ static void irdma_free_node(struct irdma_sc_vsi *vsi, struct irdma_ws_node *node) { struct irdma_virt_mem ws_mem; if (node->index) irdma_free_ws_node_id(vsi->dev, node->index); ws_mem.va = node; ws_mem.size = sizeof(struct irdma_ws_node); kfree(ws_mem.va); } /** * irdma_ws_cqp_cmd - Post CQP work scheduler node cmd * @vsi: vsi pointer * @node: pointer to node * @cmd: add, remove or modify */ static int irdma_ws_cqp_cmd(struct irdma_sc_vsi *vsi, struct irdma_ws_node *node, u8 cmd) { struct irdma_ws_node_info node_info = {}; node_info.id = node->index; node_info.vsi = node->vsi_index; if (node->parent) node_info.parent_id = node->parent->index; else node_info.parent_id = node_info.id; node_info.weight = node->rel_bw; node_info.tc = node->traffic_class; node_info.prio_type = node->prio_type; node_info.type_leaf = node->type_leaf; node_info.enable = node->enable; if (irdma_cqp_ws_node_cmd(vsi->dev, cmd, &node_info)) { ibdev_dbg(to_ibdev(vsi->dev), "WS: CQP WS CMD failed\n"); return -ENOMEM; } if (node->type_leaf && cmd == IRDMA_OP_WS_ADD_NODE) { node->qs_handle = node_info.qs_handle; vsi->qos[node->user_pri].qs_handle = node_info.qs_handle; } return 0; } /** * ws_find_node - Find SC WS node based on VSI id or TC * @parent: parent node of First VSI or TC node * @match_val: value to match * @type: match type VSI/TC */ static struct irdma_ws_node *ws_find_node(struct irdma_ws_node *parent, u16 match_val, enum irdma_ws_match_type type) { struct irdma_ws_node *node; switch (type) { case WS_MATCH_TYPE_VSI: list_for_each_entry(node, &parent->child_list_head, siblings) { if (node->vsi_index == match_val) return node; } break; case WS_MATCH_TYPE_TC: list_for_each_entry(node, &parent->child_list_head, siblings) { if (node->traffic_class == match_val) return node; } break; default: break; } return NULL; } /** * irdma_tc_in_use - Checks to see if a leaf node is in use * @vsi: vsi pointer * @user_pri: user priority */ static bool irdma_tc_in_use(struct irdma_sc_vsi *vsi, u8 user_pri) { int i; mutex_lock(&vsi->qos[user_pri].qos_mutex); if (!list_empty(&vsi->qos[user_pri].qplist)) { mutex_unlock(&vsi->qos[user_pri].qos_mutex); return true; } /* Check if the traffic class associated with the given user priority * is in use by any other user priority. If so, nothing left to do */ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) { if (vsi->qos[i].traffic_class == vsi->qos[user_pri].traffic_class && !list_empty(&vsi->qos[i].qplist)) { mutex_unlock(&vsi->qos[user_pri].qos_mutex); return true; } } mutex_unlock(&vsi->qos[user_pri].qos_mutex); return false; } /** * irdma_remove_leaf - Remove leaf node unconditionally * @vsi: vsi pointer * @user_pri: user priority */ static void irdma_remove_leaf(struct irdma_sc_vsi *vsi, u8 user_pri) { struct irdma_ws_node *ws_tree_root, *vsi_node, *tc_node; int i; u16 traffic_class; traffic_class = vsi->qos[user_pri].traffic_class; for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) if (vsi->qos[i].traffic_class == traffic_class) vsi->qos[i].valid = false; ws_tree_root = vsi->dev->ws_tree_root; if (!ws_tree_root) return; vsi_node = ws_find_node(ws_tree_root, vsi->vsi_idx, WS_MATCH_TYPE_VSI); if (!vsi_node) return; tc_node = ws_find_node(vsi_node, vsi->qos[user_pri].traffic_class, WS_MATCH_TYPE_TC); if (!tc_node) return; irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_DELETE_NODE); vsi->unregister_qset(vsi, tc_node); list_del(&tc_node->siblings); irdma_free_node(vsi, tc_node); /* Check if VSI node can be freed */ if (list_empty(&vsi_node->child_list_head)) { irdma_ws_cqp_cmd(vsi, vsi_node, IRDMA_OP_WS_DELETE_NODE); list_del(&vsi_node->siblings); irdma_free_node(vsi, vsi_node); /* Free head node there are no remaining VSI nodes */ if (list_empty(&ws_tree_root->child_list_head)) { irdma_ws_cqp_cmd(vsi, ws_tree_root, IRDMA_OP_WS_DELETE_NODE); irdma_free_node(vsi, ws_tree_root); vsi->dev->ws_tree_root = NULL; } } } /** * irdma_ws_add - Build work scheduler tree, set RDMA qs_handle * @vsi: vsi pointer * @user_pri: user priority */ int irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri) { struct irdma_ws_node *ws_tree_root; struct irdma_ws_node *vsi_node; struct irdma_ws_node *tc_node; u16 traffic_class; int ret = 0; int i; mutex_lock(&vsi->dev->ws_mutex); if (vsi->tc_change_pending) { ret = -EBUSY; goto exit; } if (vsi->qos[user_pri].valid) goto exit; ws_tree_root = vsi->dev->ws_tree_root; if (!ws_tree_root) { ibdev_dbg(to_ibdev(vsi->dev), "WS: Creating root node\n"); ws_tree_root = irdma_alloc_node(vsi, user_pri, WS_NODE_TYPE_PARENT, NULL); if (!ws_tree_root) { ret = -ENOMEM; goto exit; } ret = irdma_ws_cqp_cmd(vsi, ws_tree_root, IRDMA_OP_WS_ADD_NODE); if (ret) { irdma_free_node(vsi, ws_tree_root); goto exit; } vsi->dev->ws_tree_root = ws_tree_root; } /* Find a second tier node that matches the VSI */ vsi_node = ws_find_node(ws_tree_root, vsi->vsi_idx, WS_MATCH_TYPE_VSI); /* If VSI node doesn't exist, add one */ if (!vsi_node) { ibdev_dbg(to_ibdev(vsi->dev), "WS: Node not found matching VSI %d\n", vsi->vsi_idx); vsi_node = irdma_alloc_node(vsi, user_pri, WS_NODE_TYPE_PARENT, ws_tree_root); if (!vsi_node) { ret = -ENOMEM; goto vsi_add_err; } ret = irdma_ws_cqp_cmd(vsi, vsi_node, IRDMA_OP_WS_ADD_NODE); if (ret) { irdma_free_node(vsi, vsi_node); goto vsi_add_err; } list_add(&vsi_node->siblings, &ws_tree_root->child_list_head); } ibdev_dbg(to_ibdev(vsi->dev), "WS: Using node %d which represents VSI %d\n", vsi_node->index, vsi->vsi_idx); traffic_class = vsi->qos[user_pri].traffic_class; tc_node = ws_find_node(vsi_node, traffic_class, WS_MATCH_TYPE_TC); if (!tc_node) { /* Add leaf node */ ibdev_dbg(to_ibdev(vsi->dev), "WS: Node not found matching VSI %d and TC %d\n", vsi->vsi_idx, traffic_class); tc_node = irdma_alloc_node(vsi, user_pri, WS_NODE_TYPE_LEAF, vsi_node); if (!tc_node) { ret = -ENOMEM; goto leaf_add_err; } ret = irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_ADD_NODE); if (ret) { irdma_free_node(vsi, tc_node); goto leaf_add_err; } list_add(&tc_node->siblings, &vsi_node->child_list_head); /* * callback to LAN to update the LAN tree with our node */ ret = vsi->register_qset(vsi, tc_node); if (ret) goto reg_err; tc_node->enable = true; ret = irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_MODIFY_NODE); if (ret) { vsi->unregister_qset(vsi, tc_node); goto reg_err; } } ibdev_dbg(to_ibdev(vsi->dev), "WS: Using node %d which represents VSI %d TC %d\n", tc_node->index, vsi->vsi_idx, traffic_class); /* * Iterate through other UPs and update the QS handle if they have * a matching traffic class. */ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) { if (vsi->qos[i].traffic_class == traffic_class) { vsi->qos[i].qs_handle = tc_node->qs_handle; vsi->qos[i].lan_qos_handle = tc_node->lan_qs_handle; vsi->qos[i].l2_sched_node_id = tc_node->l2_sched_node_id; vsi->qos[i].valid = true; } } goto exit; reg_err: irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_DELETE_NODE); list_del(&tc_node->siblings); irdma_free_node(vsi, tc_node); leaf_add_err: if (list_empty(&vsi_node->child_list_head)) { if (irdma_ws_cqp_cmd(vsi, vsi_node, IRDMA_OP_WS_DELETE_NODE)) goto exit; list_del(&vsi_node->siblings); irdma_free_node(vsi, vsi_node); } vsi_add_err: /* Free head node there are no remaining VSI nodes */ if (list_empty(&ws_tree_root->child_list_head)) { irdma_ws_cqp_cmd(vsi, ws_tree_root, IRDMA_OP_WS_DELETE_NODE); vsi->dev->ws_tree_root = NULL; irdma_free_node(vsi, ws_tree_root); } exit: mutex_unlock(&vsi->dev->ws_mutex); return ret; } /** * irdma_ws_remove - Free WS scheduler node, update WS tree * @vsi: vsi pointer * @user_pri: user priority */ void irdma_ws_remove(struct irdma_sc_vsi *vsi, u8 user_pri) { mutex_lock(&vsi->dev->ws_mutex); if (irdma_tc_in_use(vsi, user_pri)) goto exit; irdma_remove_leaf(vsi, user_pri); exit: mutex_unlock(&vsi->dev->ws_mutex); } /** * irdma_ws_reset - Reset entire WS tree * @vsi: vsi pointer */ void irdma_ws_reset(struct irdma_sc_vsi *vsi) { u8 i; mutex_lock(&vsi->dev->ws_mutex); for (i = 0; i < IRDMA_MAX_USER_PRIORITY; ++i) irdma_remove_leaf(vsi, i); mutex_unlock(&vsi->dev->ws_mutex); }
linux-master
drivers/infiniband/hw/irdma/ws.c
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB /* Copyright (c) 2017 - 2021 Intel Corporation */ #include "osdep.h" #include "type.h" #include "icrdma_hw.h" static u32 icrdma_regs[IRDMA_MAX_REGS] = { PFPE_CQPTAIL, PFPE_CQPDB, PFPE_CCQPSTATUS, PFPE_CCQPHIGH, PFPE_CCQPLOW, PFPE_CQARM, PFPE_CQACK, PFPE_AEQALLOC, PFPE_CQPERRCODES, PFPE_WQEALLOC, GLINT_DYN_CTL(0), ICRDMA_DB_ADDR_OFFSET, GLPCI_LBARCTRL, GLPE_CPUSTATUS0, GLPE_CPUSTATUS1, GLPE_CPUSTATUS2, PFINT_AEQCTL, GLINT_CEQCTL(0), VSIQF_PE_CTL1(0), PFHMC_PDINV, GLHMC_VFPDINV(0), GLPE_CRITERR, GLINT_RATE(0), }; static u64 icrdma_masks[IRDMA_MAX_MASKS] = { ICRDMA_CCQPSTATUS_CCQP_DONE, ICRDMA_CCQPSTATUS_CCQP_ERR, ICRDMA_CQPSQ_STAG_PDID, ICRDMA_CQPSQ_CQ_CEQID, ICRDMA_CQPSQ_CQ_CQID, ICRDMA_COMMIT_FPM_CQCNT, }; static u64 icrdma_shifts[IRDMA_MAX_SHIFTS] = { ICRDMA_CCQPSTATUS_CCQP_DONE_S, ICRDMA_CCQPSTATUS_CCQP_ERR_S, ICRDMA_CQPSQ_STAG_PDID_S, ICRDMA_CQPSQ_CQ_CEQID_S, ICRDMA_CQPSQ_CQ_CQID_S, ICRDMA_COMMIT_FPM_CQCNT_S, }; /** * icrdma_ena_irq - Enable interrupt * @dev: pointer to the device structure * @idx: vector index */ static void icrdma_ena_irq(struct irdma_sc_dev *dev, u32 idx) { u32 val; u32 interval = 0; if (dev->ceq_itr && dev->aeq->msix_idx != idx) interval = dev->ceq_itr >> 1; /* 2 usec units */ val = FIELD_PREP(IRDMA_GLINT_DYN_CTL_ITR_INDX, 0) | FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTERVAL, interval) | FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTENA, 1) | FIELD_PREP(IRDMA_GLINT_DYN_CTL_CLEARPBA, 1); if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1) writel(val, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + idx); else writel(val, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + (idx - 1)); } /** * icrdma_disable_irq - Disable interrupt * @dev: pointer to the device structure * @idx: vector index */ static void icrdma_disable_irq(struct irdma_sc_dev *dev, u32 idx) { if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1) writel(0, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + idx); else writel(0, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + (idx - 1)); } /** * icrdma_cfg_ceq- Configure CEQ interrupt * @dev: pointer to the device structure * @ceq_id: Completion Event Queue ID * @idx: vector index * @enable: True to enable, False disables */ static void icrdma_cfg_ceq(struct irdma_sc_dev *dev, u32 ceq_id, u32 idx, bool enable) { u32 reg_val; reg_val = FIELD_PREP(IRDMA_GLINT_CEQCTL_CAUSE_ENA, enable) | FIELD_PREP(IRDMA_GLINT_CEQCTL_MSIX_INDX, idx) | FIELD_PREP(IRDMA_GLINT_CEQCTL_ITR_INDX, 3); writel(reg_val, dev->hw_regs[IRDMA_GLINT_CEQCTL] + ceq_id); } static const struct irdma_irq_ops icrdma_irq_ops = { .irdma_cfg_aeq = irdma_cfg_aeq, .irdma_cfg_ceq = icrdma_cfg_ceq, .irdma_dis_irq = icrdma_disable_irq, .irdma_en_irq = icrdma_ena_irq, }; static const struct irdma_hw_stat_map icrdma_hw_stat_map[] = { [IRDMA_HW_STAT_INDEX_RXVLANERR] = { 0, 32, IRDMA_MAX_STATS_24 }, [IRDMA_HW_STAT_INDEX_IP4RXOCTS] = { 8, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_IP4RXPKTS] = { 16, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_IP4RXDISCARD] = { 24, 32, IRDMA_MAX_STATS_32 }, [IRDMA_HW_STAT_INDEX_IP4RXTRUNC] = { 24, 0, IRDMA_MAX_STATS_32 }, [IRDMA_HW_STAT_INDEX_IP4RXFRAGS] = { 32, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS] = { 40, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS] = { 48, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_IP6RXOCTS] = { 56, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_IP6RXPKTS] = { 64, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_IP6RXDISCARD] = { 72, 32, IRDMA_MAX_STATS_32 }, [IRDMA_HW_STAT_INDEX_IP6RXTRUNC] = { 72, 0, IRDMA_MAX_STATS_32 }, [IRDMA_HW_STAT_INDEX_IP6RXFRAGS] = { 80, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS] = { 88, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS] = { 96, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_IP4TXOCTS] = { 104, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_IP4TXPKTS] = { 112, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_IP4TXFRAGS] = { 120, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS] = { 128, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS] = { 136, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_IP6TXOCTS] = { 144, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_IP6TXPKTS] = { 152, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_IP6TXFRAGS] = { 160, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS] = { 168, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS] = { 176, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] = { 184, 32, IRDMA_MAX_STATS_24 }, [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] = { 184, 0, IRDMA_MAX_STATS_24 }, [IRDMA_HW_STAT_INDEX_TCPRXSEGS] = { 192, 32, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_TCPRXOPTERR] = { 200, 32, IRDMA_MAX_STATS_24 }, [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] = { 200, 0, IRDMA_MAX_STATS_24 }, [IRDMA_HW_STAT_INDEX_TCPTXSEG] = { 208, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_TCPRTXSEG] = { 216, 32, IRDMA_MAX_STATS_32 }, [IRDMA_HW_STAT_INDEX_UDPRXPKTS] = { 224, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_UDPTXPKTS] = { 232, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_RDMARXWRS] = { 240, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_RDMARXRDS] = { 248, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_RDMARXSNDS] = { 256, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_RDMATXWRS] = { 264, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_RDMATXRDS] = { 272, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_RDMATXSNDS] = { 280, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_RDMAVBND] = { 288, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_RDMAVINV] = { 296, 0, IRDMA_MAX_STATS_48 }, [IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS] = { 304, 0, IRDMA_MAX_STATS_56 }, [IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED] = { 312, 32, IRDMA_MAX_STATS_24 }, [IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED] = { 312, 0, IRDMA_MAX_STATS_32 }, [IRDMA_HW_STAT_INDEX_TXNPCNPSENT] = { 320, 0, IRDMA_MAX_STATS_32 }, }; void icrdma_init_hw(struct irdma_sc_dev *dev) { int i; u8 __iomem *hw_addr; for (i = 0; i < IRDMA_MAX_REGS; ++i) { hw_addr = dev->hw->hw_addr; if (i == IRDMA_DB_ADDR_OFFSET) hw_addr = NULL; dev->hw_regs[i] = (u32 __iomem *)(hw_addr + icrdma_regs[i]); } dev->hw_attrs.max_hw_vf_fpm_id = IRDMA_MAX_VF_FPM_ID; dev->hw_attrs.first_hw_vf_fpm_id = IRDMA_FIRST_VF_FPM_ID; for (i = 0; i < IRDMA_MAX_SHIFTS; ++i) dev->hw_shifts[i] = icrdma_shifts[i]; for (i = 0; i < IRDMA_MAX_MASKS; ++i) dev->hw_masks[i] = icrdma_masks[i]; dev->wqe_alloc_db = dev->hw_regs[IRDMA_WQEALLOC]; dev->cq_arm_db = dev->hw_regs[IRDMA_CQARM]; dev->aeq_alloc_db = dev->hw_regs[IRDMA_AEQALLOC]; dev->cqp_db = dev->hw_regs[IRDMA_CQPDB]; dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK]; dev->irq_ops = &icrdma_irq_ops; dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M | SZ_1G; dev->hw_stats_map = icrdma_hw_stat_map; dev->hw_attrs.max_hw_ird = ICRDMA_MAX_IRD_SIZE; dev->hw_attrs.max_hw_ord = ICRDMA_MAX_ORD_SIZE; dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT; dev->hw_attrs.max_stat_idx = IRDMA_HW_STAT_INDEX_MAX_GEN_2; dev->hw_attrs.uk_attrs.min_hw_wq_size = ICRDMA_MIN_WQ_SIZE; dev->hw_attrs.uk_attrs.max_hw_sq_chunk = IRDMA_MAX_QUANTA_PER_WR; dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_RTS_AE | IRDMA_FEATURE_CQ_RESIZE; }
linux-master
drivers/infiniband/hw/irdma/icrdma_hw.c
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB /* Copyright (c) 2015 - 2021 Intel Corporation */ #include "main.h" /** * irdma_query_device - get device attributes * @ibdev: device pointer from stack * @props: returning device attributes * @udata: user data */ static int irdma_query_device(struct ib_device *ibdev, struct ib_device_attr *props, struct ib_udata *udata) { struct irdma_device *iwdev = to_iwdev(ibdev); struct irdma_pci_f *rf = iwdev->rf; struct pci_dev *pcidev = iwdev->rf->pcidev; struct irdma_hw_attrs *hw_attrs = &rf->sc_dev.hw_attrs; if (udata->inlen || udata->outlen) return -EINVAL; memset(props, 0, sizeof(*props)); addrconf_addr_eui48((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr); props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 | irdma_fw_minor_ver(&rf->sc_dev); props->device_cap_flags = IB_DEVICE_MEM_WINDOW | IB_DEVICE_MEM_MGT_EXTENSIONS; props->kernel_cap_flags = IBK_LOCAL_DMA_LKEY; props->vendor_id = pcidev->vendor; props->vendor_part_id = pcidev->device; props->hw_ver = rf->pcidev->revision; props->page_size_cap = hw_attrs->page_size_cap; props->max_mr_size = hw_attrs->max_mr_size; props->max_qp = rf->max_qp - rf->used_qps; props->max_qp_wr = hw_attrs->max_qp_wr; props->max_send_sge = hw_attrs->uk_attrs.max_hw_wq_frags; props->max_recv_sge = hw_attrs->uk_attrs.max_hw_wq_frags; props->max_cq = rf->max_cq - rf->used_cqs; props->max_cqe = rf->max_cqe - 1; props->max_mr = rf->max_mr - rf->used_mrs; props->max_mw = props->max_mr; props->max_pd = rf->max_pd - rf->used_pds; props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges; props->max_qp_rd_atom = hw_attrs->max_hw_ird; props->max_qp_init_rd_atom = hw_attrs->max_hw_ord; if (rdma_protocol_roce(ibdev, 1)) { props->device_cap_flags |= IB_DEVICE_RC_RNR_NAK_GEN; props->max_pkeys = IRDMA_PKEY_TBL_SZ; } props->max_ah = rf->max_ah; props->max_mcast_grp = rf->max_mcg; props->max_mcast_qp_attach = IRDMA_MAX_MGS_PER_CTX; props->max_total_mcast_qp_attach = rf->max_qp * IRDMA_MAX_MGS_PER_CTX; props->max_fast_reg_page_list_len = IRDMA_MAX_PAGES_PER_FMR; #define HCA_CLOCK_TIMESTAMP_MASK 0x1ffff if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_2) props->timestamp_mask = HCA_CLOCK_TIMESTAMP_MASK; return 0; } /** * irdma_query_port - get port attributes * @ibdev: device pointer from stack * @port: port number for query * @props: returning device attributes */ static int irdma_query_port(struct ib_device *ibdev, u32 port, struct ib_port_attr *props) { struct irdma_device *iwdev = to_iwdev(ibdev); struct net_device *netdev = iwdev->netdev; /* no need to zero out pros here. done by caller */ props->max_mtu = IB_MTU_4096; props->active_mtu = ib_mtu_int_to_enum(netdev->mtu); props->lid = 1; props->lmc = 0; props->sm_lid = 0; props->sm_sl = 0; if (netif_carrier_ok(netdev) && netif_running(netdev)) { props->state = IB_PORT_ACTIVE; props->phys_state = IB_PORT_PHYS_STATE_LINK_UP; } else { props->state = IB_PORT_DOWN; props->phys_state = IB_PORT_PHYS_STATE_DISABLED; } ib_get_eth_speed(ibdev, port, &props->active_speed, &props->active_width); if (rdma_protocol_roce(ibdev, 1)) { props->gid_tbl_len = 32; props->ip_gids = true; props->pkey_tbl_len = IRDMA_PKEY_TBL_SZ; } else { props->gid_tbl_len = 1; } props->qkey_viol_cntr = 0; props->port_cap_flags |= IB_PORT_CM_SUP | IB_PORT_REINIT_SUP; props->max_msg_sz = iwdev->rf->sc_dev.hw_attrs.max_hw_outbound_msg_size; return 0; } /** * irdma_disassociate_ucontext - Disassociate user context * @context: ib user context */ static void irdma_disassociate_ucontext(struct ib_ucontext *context) { } static int irdma_mmap_legacy(struct irdma_ucontext *ucontext, struct vm_area_struct *vma) { u64 pfn; if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE) return -EINVAL; vma->vm_private_data = ucontext; pfn = ((uintptr_t)ucontext->iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET] + pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT; return rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, PAGE_SIZE, pgprot_noncached(vma->vm_page_prot), NULL); } static void irdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry) { struct irdma_user_mmap_entry *entry = to_irdma_mmap_entry(rdma_entry); kfree(entry); } static struct rdma_user_mmap_entry* irdma_user_mmap_entry_insert(struct irdma_ucontext *ucontext, u64 bar_offset, enum irdma_mmap_flag mmap_flag, u64 *mmap_offset) { struct irdma_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL); int ret; if (!entry) return NULL; entry->bar_offset = bar_offset; entry->mmap_flag = mmap_flag; ret = rdma_user_mmap_entry_insert(&ucontext->ibucontext, &entry->rdma_entry, PAGE_SIZE); if (ret) { kfree(entry); return NULL; } *mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry); return &entry->rdma_entry; } /** * irdma_mmap - user memory map * @context: context created during alloc * @vma: kernel info for user memory map */ static int irdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) { struct rdma_user_mmap_entry *rdma_entry; struct irdma_user_mmap_entry *entry; struct irdma_ucontext *ucontext; u64 pfn; int ret; ucontext = to_ucontext(context); /* Legacy support for libi40iw with hard-coded mmap key */ if (ucontext->legacy_mode) return irdma_mmap_legacy(ucontext, vma); rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma); if (!rdma_entry) { ibdev_dbg(&ucontext->iwdev->ibdev, "VERBS: pgoff[0x%lx] does not have valid entry\n", vma->vm_pgoff); return -EINVAL; } entry = to_irdma_mmap_entry(rdma_entry); ibdev_dbg(&ucontext->iwdev->ibdev, "VERBS: bar_offset [0x%llx] mmap_flag [%d]\n", entry->bar_offset, entry->mmap_flag); pfn = (entry->bar_offset + pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT; switch (entry->mmap_flag) { case IRDMA_MMAP_IO_NC: ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE, pgprot_noncached(vma->vm_page_prot), rdma_entry); break; case IRDMA_MMAP_IO_WC: ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE, pgprot_writecombine(vma->vm_page_prot), rdma_entry); break; default: ret = -EINVAL; } if (ret) ibdev_dbg(&ucontext->iwdev->ibdev, "VERBS: bar_offset [0x%llx] mmap_flag[%d] err[%d]\n", entry->bar_offset, entry->mmap_flag, ret); rdma_user_mmap_entry_put(rdma_entry); return ret; } /** * irdma_alloc_push_page - allocate a push page for qp * @iwqp: qp pointer */ static void irdma_alloc_push_page(struct irdma_qp *iwqp) { struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; struct irdma_device *iwdev = iwqp->iwdev; struct irdma_sc_qp *qp = &iwqp->sc_qp; int status; cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); if (!cqp_request) return; cqp_info = &cqp_request->info; cqp_info->cqp_cmd = IRDMA_OP_MANAGE_PUSH_PAGE; cqp_info->post_sq = 1; cqp_info->in.u.manage_push_page.info.push_idx = 0; cqp_info->in.u.manage_push_page.info.qs_handle = qp->vsi->qos[qp->user_pri].qs_handle; cqp_info->in.u.manage_push_page.info.free_page = 0; cqp_info->in.u.manage_push_page.info.push_page_type = 0; cqp_info->in.u.manage_push_page.cqp = &iwdev->rf->cqp.sc_cqp; cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request; status = irdma_handle_cqp_op(iwdev->rf, cqp_request); if (!status && cqp_request->compl_info.op_ret_val < iwdev->rf->sc_dev.hw_attrs.max_hw_device_pages) { qp->push_idx = cqp_request->compl_info.op_ret_val; qp->push_offset = 0; } irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); } /** * irdma_alloc_ucontext - Allocate the user context data structure * @uctx: uverbs context pointer * @udata: user data * * This keeps track of all objects associated with a particular * user-mode client. */ static int irdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) { #define IRDMA_ALLOC_UCTX_MIN_REQ_LEN offsetofend(struct irdma_alloc_ucontext_req, rsvd8) #define IRDMA_ALLOC_UCTX_MIN_RESP_LEN offsetofend(struct irdma_alloc_ucontext_resp, rsvd) struct ib_device *ibdev = uctx->device; struct irdma_device *iwdev = to_iwdev(ibdev); struct irdma_alloc_ucontext_req req = {}; struct irdma_alloc_ucontext_resp uresp = {}; struct irdma_ucontext *ucontext = to_ucontext(uctx); struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs; if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN || udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN) return -EINVAL; if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) return -EINVAL; if (req.userspace_ver < 4 || req.userspace_ver > IRDMA_ABI_VER) goto ver_error; ucontext->iwdev = iwdev; ucontext->abi_ver = req.userspace_ver; if (req.comp_mask & IRDMA_ALLOC_UCTX_USE_RAW_ATTR) ucontext->use_raw_attrs = true; /* GEN_1 legacy support with libi40iw */ if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) { if (uk_attrs->hw_rev != IRDMA_GEN_1) return -EOPNOTSUPP; ucontext->legacy_mode = true; uresp.max_qps = iwdev->rf->max_qp; uresp.max_pds = iwdev->rf->sc_dev.hw_attrs.max_hw_pds; uresp.wq_size = iwdev->rf->sc_dev.hw_attrs.max_qp_wr * 2; uresp.kernel_ver = req.userspace_ver; if (ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen))) return -EFAULT; } else { u64 bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET]; ucontext->db_mmap_entry = irdma_user_mmap_entry_insert(ucontext, bar_off, IRDMA_MMAP_IO_NC, &uresp.db_mmap_key); if (!ucontext->db_mmap_entry) return -ENOMEM; uresp.kernel_ver = IRDMA_ABI_VER; uresp.feature_flags = uk_attrs->feature_flags; uresp.max_hw_wq_frags = uk_attrs->max_hw_wq_frags; uresp.max_hw_read_sges = uk_attrs->max_hw_read_sges; uresp.max_hw_inline = uk_attrs->max_hw_inline; uresp.max_hw_rq_quanta = uk_attrs->max_hw_rq_quanta; uresp.max_hw_wq_quanta = uk_attrs->max_hw_wq_quanta; uresp.max_hw_sq_chunk = uk_attrs->max_hw_sq_chunk; uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size; uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size; uresp.hw_rev = uk_attrs->hw_rev; uresp.comp_mask |= IRDMA_ALLOC_UCTX_USE_RAW_ATTR; uresp.min_hw_wq_size = uk_attrs->min_hw_wq_size; uresp.comp_mask |= IRDMA_ALLOC_UCTX_MIN_HW_WQ_SIZE; if (ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen))) { rdma_user_mmap_entry_remove(ucontext->db_mmap_entry); return -EFAULT; } } INIT_LIST_HEAD(&ucontext->cq_reg_mem_list); spin_lock_init(&ucontext->cq_reg_mem_list_lock); INIT_LIST_HEAD(&ucontext->qp_reg_mem_list); spin_lock_init(&ucontext->qp_reg_mem_list_lock); return 0; ver_error: ibdev_err(&iwdev->ibdev, "Invalid userspace driver version detected. Detected version %d, should be %d\n", req.userspace_ver, IRDMA_ABI_VER); return -EINVAL; } /** * irdma_dealloc_ucontext - deallocate the user context data structure * @context: user context created during alloc */ static void irdma_dealloc_ucontext(struct ib_ucontext *context) { struct irdma_ucontext *ucontext = to_ucontext(context); rdma_user_mmap_entry_remove(ucontext->db_mmap_entry); } /** * irdma_alloc_pd - allocate protection domain * @pd: PD pointer * @udata: user data */ static int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata) { #define IRDMA_ALLOC_PD_MIN_RESP_LEN offsetofend(struct irdma_alloc_pd_resp, rsvd) struct irdma_pd *iwpd = to_iwpd(pd); struct irdma_device *iwdev = to_iwdev(pd->device); struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; struct irdma_pci_f *rf = iwdev->rf; struct irdma_alloc_pd_resp uresp = {}; struct irdma_sc_pd *sc_pd; u32 pd_id = 0; int err; if (udata && udata->outlen < IRDMA_ALLOC_PD_MIN_RESP_LEN) return -EINVAL; err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id, &rf->next_pd); if (err) return err; sc_pd = &iwpd->sc_pd; if (udata) { struct irdma_ucontext *ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext); irdma_sc_pd_init(dev, sc_pd, pd_id, ucontext->abi_ver); uresp.pd_id = pd_id; if (ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen))) { err = -EFAULT; goto error; } } else { irdma_sc_pd_init(dev, sc_pd, pd_id, IRDMA_ABI_VER); } return 0; error: irdma_free_rsrc(rf, rf->allocated_pds, pd_id); return err; } /** * irdma_dealloc_pd - deallocate pd * @ibpd: ptr of pd to be deallocated * @udata: user data */ static int irdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct irdma_pd *iwpd = to_iwpd(ibpd); struct irdma_device *iwdev = to_iwdev(ibpd->device); irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id); return 0; } /** * irdma_get_pbl - Retrieve pbl from a list given a virtual * address * @va: user virtual address * @pbl_list: pbl list to search in (QP's or CQ's) */ static struct irdma_pbl *irdma_get_pbl(unsigned long va, struct list_head *pbl_list) { struct irdma_pbl *iwpbl; list_for_each_entry (iwpbl, pbl_list, list) { if (iwpbl->user_base == va) { list_del(&iwpbl->list); iwpbl->on_list = false; return iwpbl; } } return NULL; } /** * irdma_clean_cqes - clean cq entries for qp * @iwqp: qp ptr (user or kernel) * @iwcq: cq ptr */ static void irdma_clean_cqes(struct irdma_qp *iwqp, struct irdma_cq *iwcq) { struct irdma_cq_uk *ukcq = &iwcq->sc_cq.cq_uk; unsigned long flags; spin_lock_irqsave(&iwcq->lock, flags); irdma_uk_clean_cq(&iwqp->sc_qp.qp_uk, ukcq); spin_unlock_irqrestore(&iwcq->lock, flags); } static void irdma_remove_push_mmap_entries(struct irdma_qp *iwqp) { if (iwqp->push_db_mmap_entry) { rdma_user_mmap_entry_remove(iwqp->push_db_mmap_entry); iwqp->push_db_mmap_entry = NULL; } if (iwqp->push_wqe_mmap_entry) { rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry); iwqp->push_wqe_mmap_entry = NULL; } } static int irdma_setup_push_mmap_entries(struct irdma_ucontext *ucontext, struct irdma_qp *iwqp, u64 *push_wqe_mmap_key, u64 *push_db_mmap_key) { struct irdma_device *iwdev = ucontext->iwdev; u64 rsvd, bar_off; rsvd = IRDMA_PF_BAR_RSVD; bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET]; /* skip over db page */ bar_off += IRDMA_HW_PAGE_SIZE; /* push wqe page */ bar_off += rsvd + iwqp->sc_qp.push_idx * IRDMA_HW_PAGE_SIZE; iwqp->push_wqe_mmap_entry = irdma_user_mmap_entry_insert(ucontext, bar_off, IRDMA_MMAP_IO_WC, push_wqe_mmap_key); if (!iwqp->push_wqe_mmap_entry) return -ENOMEM; /* push doorbell page */ bar_off += IRDMA_HW_PAGE_SIZE; iwqp->push_db_mmap_entry = irdma_user_mmap_entry_insert(ucontext, bar_off, IRDMA_MMAP_IO_NC, push_db_mmap_key); if (!iwqp->push_db_mmap_entry) { rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry); return -ENOMEM; } return 0; } /** * irdma_destroy_qp - destroy qp * @ibqp: qp's ib pointer also to get to device's qp address * @udata: user data */ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) { struct irdma_qp *iwqp = to_iwqp(ibqp); struct irdma_device *iwdev = iwqp->iwdev; iwqp->sc_qp.qp_uk.destroy_pending = true; if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS) irdma_modify_qp_to_err(&iwqp->sc_qp); if (!iwqp->user_mode) cancel_delayed_work_sync(&iwqp->dwork_flush); if (!iwqp->user_mode) { if (iwqp->iwscq) { irdma_clean_cqes(iwqp, iwqp->iwscq); if (iwqp->iwrcq != iwqp->iwscq) irdma_clean_cqes(iwqp, iwqp->iwrcq); } } irdma_qp_rem_ref(&iwqp->ibqp); wait_for_completion(&iwqp->free_qp); irdma_free_lsmm_rsrc(iwqp); irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp); irdma_remove_push_mmap_entries(iwqp); irdma_free_qp_rsrc(iwqp); return 0; } /** * irdma_setup_virt_qp - setup for allocation of virtual qp * @iwdev: irdma device * @iwqp: qp ptr * @init_info: initialize info to return */ static void irdma_setup_virt_qp(struct irdma_device *iwdev, struct irdma_qp *iwqp, struct irdma_qp_init_info *init_info) { struct irdma_pbl *iwpbl = iwqp->iwpbl; struct irdma_qp_mr *qpmr = &iwpbl->qp_mr; iwqp->page = qpmr->sq_page; init_info->shadow_area_pa = qpmr->shadow; if (iwpbl->pbl_allocated) { init_info->virtual_map = true; init_info->sq_pa = qpmr->sq_pbl.idx; init_info->rq_pa = qpmr->rq_pbl.idx; } else { init_info->sq_pa = qpmr->sq_pbl.addr; init_info->rq_pa = qpmr->rq_pbl.addr; } } /** * irdma_setup_umode_qp - setup sq and rq size in user mode qp * @udata: udata * @iwdev: iwarp device * @iwqp: qp ptr (user or kernel) * @info: initialize info to return * @init_attr: Initial QP create attributes */ static int irdma_setup_umode_qp(struct ib_udata *udata, struct irdma_device *iwdev, struct irdma_qp *iwqp, struct irdma_qp_init_info *info, struct ib_qp_init_attr *init_attr) { struct irdma_ucontext *ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext); struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info; struct irdma_create_qp_req req; unsigned long flags; int ret; ret = ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen)); if (ret) { ibdev_dbg(&iwdev->ibdev, "VERBS: ib_copy_from_data fail\n"); return ret; } iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx; iwqp->user_mode = 1; if (req.user_wqe_bufs) { info->qp_uk_init_info.legacy_mode = ucontext->legacy_mode; spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs, &ucontext->qp_reg_mem_list); spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); if (!iwqp->iwpbl) { ret = -ENODATA; ibdev_dbg(&iwdev->ibdev, "VERBS: no pbl info\n"); return ret; } } if (!ucontext->use_raw_attrs) { /** * Maintain backward compat with older ABI which passes sq and * rq depth in quanta in cap.max_send_wr and cap.max_recv_wr. * There is no way to compute the correct value of * iwqp->max_send_wr/max_recv_wr in the kernel. */ iwqp->max_send_wr = init_attr->cap.max_send_wr; iwqp->max_recv_wr = init_attr->cap.max_recv_wr; ukinfo->sq_size = init_attr->cap.max_send_wr; ukinfo->rq_size = init_attr->cap.max_recv_wr; irdma_uk_calc_shift_wq(ukinfo, &ukinfo->sq_shift, &ukinfo->rq_shift); } else { ret = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth, &ukinfo->sq_shift); if (ret) return ret; ret = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth, &ukinfo->rq_shift); if (ret) return ret; iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift; iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift; ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift; ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift; } irdma_setup_virt_qp(iwdev, iwqp, info); return 0; } /** * irdma_setup_kmode_qp - setup initialization for kernel mode qp * @iwdev: iwarp device * @iwqp: qp ptr (user or kernel) * @info: initialize info to return * @init_attr: Initial QP create attributes */ static int irdma_setup_kmode_qp(struct irdma_device *iwdev, struct irdma_qp *iwqp, struct irdma_qp_init_info *info, struct ib_qp_init_attr *init_attr) { struct irdma_dma_mem *mem = &iwqp->kqp.dma_mem; u32 size; int status; struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info; status = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth, &ukinfo->sq_shift); if (status) return status; status = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth, &ukinfo->rq_shift); if (status) return status; iwqp->kqp.sq_wrid_mem = kcalloc(ukinfo->sq_depth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL); if (!iwqp->kqp.sq_wrid_mem) return -ENOMEM; iwqp->kqp.rq_wrid_mem = kcalloc(ukinfo->rq_depth, sizeof(*iwqp->kqp.rq_wrid_mem), GFP_KERNEL); if (!iwqp->kqp.rq_wrid_mem) { kfree(iwqp->kqp.sq_wrid_mem); iwqp->kqp.sq_wrid_mem = NULL; return -ENOMEM; } ukinfo->sq_wrtrk_array = iwqp->kqp.sq_wrid_mem; ukinfo->rq_wrid_array = iwqp->kqp.rq_wrid_mem; size = (ukinfo->sq_depth + ukinfo->rq_depth) * IRDMA_QP_WQE_MIN_SIZE; size += (IRDMA_SHADOW_AREA_SIZE << 3); mem->size = ALIGN(size, 256); mem->va = dma_alloc_coherent(iwdev->rf->hw.device, mem->size, &mem->pa, GFP_KERNEL); if (!mem->va) { kfree(iwqp->kqp.sq_wrid_mem); iwqp->kqp.sq_wrid_mem = NULL; kfree(iwqp->kqp.rq_wrid_mem); iwqp->kqp.rq_wrid_mem = NULL; return -ENOMEM; } ukinfo->sq = mem->va; info->sq_pa = mem->pa; ukinfo->rq = &ukinfo->sq[ukinfo->sq_depth]; info->rq_pa = info->sq_pa + (ukinfo->sq_depth * IRDMA_QP_WQE_MIN_SIZE); ukinfo->shadow_area = ukinfo->rq[ukinfo->rq_depth].elem; info->shadow_area_pa = info->rq_pa + (ukinfo->rq_depth * IRDMA_QP_WQE_MIN_SIZE); ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift; ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift; ukinfo->qp_id = iwqp->ibqp.qp_num; iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift; iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift; init_attr->cap.max_send_wr = iwqp->max_send_wr; init_attr->cap.max_recv_wr = iwqp->max_recv_wr; return 0; } static int irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp) { struct irdma_pci_f *rf = iwqp->iwdev->rf; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; struct irdma_create_qp_info *qp_info; int status; cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); if (!cqp_request) return -ENOMEM; cqp_info = &cqp_request->info; qp_info = &cqp_request->info.in.u.qp_create.info; memset(qp_info, 0, sizeof(*qp_info)); qp_info->mac_valid = true; qp_info->cq_num_valid = true; qp_info->next_iwarp_state = IRDMA_QP_STATE_IDLE; cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE; cqp_info->post_sq = 1; cqp_info->in.u.qp_create.qp = &iwqp->sc_qp; cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request; status = irdma_handle_cqp_op(rf, cqp_request); irdma_put_cqp_request(&rf->cqp, cqp_request); return status; } static void irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp, struct irdma_qp_host_ctx_info *ctx_info) { struct irdma_device *iwdev = iwqp->iwdev; struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; struct irdma_roce_offload_info *roce_info; struct irdma_udp_offload_info *udp_info; udp_info = &iwqp->udp_info; udp_info->snd_mss = ib_mtu_enum_to_int(ib_mtu_int_to_enum(iwdev->vsi.mtu)); udp_info->cwnd = iwdev->roce_cwnd; udp_info->rexmit_thresh = 2; udp_info->rnr_nak_thresh = 2; udp_info->src_port = 0xc000; udp_info->dst_port = ROCE_V2_UDP_DPORT; roce_info = &iwqp->roce_info; ether_addr_copy(roce_info->mac_addr, iwdev->netdev->dev_addr); roce_info->rd_en = true; roce_info->wr_rdresp_en = true; roce_info->bind_en = true; roce_info->dcqcn_en = false; roce_info->rtomin = 5; roce_info->ack_credits = iwdev->roce_ackcreds; roce_info->ird_size = dev->hw_attrs.max_hw_ird; roce_info->ord_size = dev->hw_attrs.max_hw_ord; if (!iwqp->user_mode) { roce_info->priv_mode_en = true; roce_info->fast_reg_en = true; roce_info->udprivcq_en = true; } roce_info->roce_tver = 0; ctx_info->roce_info = &iwqp->roce_info; ctx_info->udp_info = &iwqp->udp_info; irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info); } static void irdma_iw_fill_and_set_qpctx_info(struct irdma_qp *iwqp, struct irdma_qp_host_ctx_info *ctx_info) { struct irdma_device *iwdev = iwqp->iwdev; struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; struct irdma_iwarp_offload_info *iwarp_info; iwarp_info = &iwqp->iwarp_info; ether_addr_copy(iwarp_info->mac_addr, iwdev->netdev->dev_addr); iwarp_info->rd_en = true; iwarp_info->wr_rdresp_en = true; iwarp_info->bind_en = true; iwarp_info->ecn_en = true; iwarp_info->rtomin = 5; if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) iwarp_info->ib_rd_en = true; if (!iwqp->user_mode) { iwarp_info->priv_mode_en = true; iwarp_info->fast_reg_en = true; } iwarp_info->ddp_ver = 1; iwarp_info->rdmap_ver = 1; ctx_info->iwarp_info = &iwqp->iwarp_info; ctx_info->iwarp_info_valid = true; irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info); ctx_info->iwarp_info_valid = false; } static int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr, struct irdma_device *iwdev) { struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs; if (init_attr->create_flags) return -EOPNOTSUPP; if (init_attr->cap.max_inline_data > uk_attrs->max_hw_inline || init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags || init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags) return -EINVAL; if (rdma_protocol_roce(&iwdev->ibdev, 1)) { if (init_attr->qp_type != IB_QPT_RC && init_attr->qp_type != IB_QPT_UD && init_attr->qp_type != IB_QPT_GSI) return -EOPNOTSUPP; } else { if (init_attr->qp_type != IB_QPT_RC) return -EOPNOTSUPP; } return 0; } static void irdma_flush_worker(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct irdma_qp *iwqp = container_of(dwork, struct irdma_qp, dwork_flush); irdma_generate_flush_completions(iwqp); } /** * irdma_create_qp - create qp * @ibqp: ptr of qp * @init_attr: attributes for qp * @udata: user data for create qp */ static int irdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr, struct ib_udata *udata) { #define IRDMA_CREATE_QP_MIN_REQ_LEN offsetofend(struct irdma_create_qp_req, user_compl_ctx) #define IRDMA_CREATE_QP_MIN_RESP_LEN offsetofend(struct irdma_create_qp_resp, rsvd) struct ib_pd *ibpd = ibqp->pd; struct irdma_pd *iwpd = to_iwpd(ibpd); struct irdma_device *iwdev = to_iwdev(ibpd->device); struct irdma_pci_f *rf = iwdev->rf; struct irdma_qp *iwqp = to_iwqp(ibqp); struct irdma_create_qp_resp uresp = {}; u32 qp_num = 0; int err_code; struct irdma_sc_qp *qp; struct irdma_sc_dev *dev = &rf->sc_dev; struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs; struct irdma_qp_init_info init_info = {}; struct irdma_qp_host_ctx_info *ctx_info; err_code = irdma_validate_qp_attrs(init_attr, iwdev); if (err_code) return err_code; if (udata && (udata->inlen < IRDMA_CREATE_QP_MIN_REQ_LEN || udata->outlen < IRDMA_CREATE_QP_MIN_RESP_LEN)) return -EINVAL; init_info.vsi = &iwdev->vsi; init_info.qp_uk_init_info.uk_attrs = uk_attrs; init_info.qp_uk_init_info.sq_size = init_attr->cap.max_send_wr; init_info.qp_uk_init_info.rq_size = init_attr->cap.max_recv_wr; init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge; init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge; init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data; qp = &iwqp->sc_qp; qp->qp_uk.back_qp = iwqp; qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX; iwqp->iwdev = iwdev; iwqp->q2_ctx_mem.size = ALIGN(IRDMA_Q2_BUF_SIZE + IRDMA_QP_CTX_SIZE, 256); iwqp->q2_ctx_mem.va = dma_alloc_coherent(dev->hw->device, iwqp->q2_ctx_mem.size, &iwqp->q2_ctx_mem.pa, GFP_KERNEL); if (!iwqp->q2_ctx_mem.va) return -ENOMEM; init_info.q2 = iwqp->q2_ctx_mem.va; init_info.q2_pa = iwqp->q2_ctx_mem.pa; init_info.host_ctx = (__le64 *)(init_info.q2 + IRDMA_Q2_BUF_SIZE); init_info.host_ctx_pa = init_info.q2_pa + IRDMA_Q2_BUF_SIZE; if (init_attr->qp_type == IB_QPT_GSI) qp_num = 1; else err_code = irdma_alloc_rsrc(rf, rf->allocated_qps, rf->max_qp, &qp_num, &rf->next_qp); if (err_code) goto error; iwqp->iwpd = iwpd; iwqp->ibqp.qp_num = qp_num; qp = &iwqp->sc_qp; iwqp->iwscq = to_iwcq(init_attr->send_cq); iwqp->iwrcq = to_iwcq(init_attr->recv_cq); iwqp->host_ctx.va = init_info.host_ctx; iwqp->host_ctx.pa = init_info.host_ctx_pa; iwqp->host_ctx.size = IRDMA_QP_CTX_SIZE; init_info.pd = &iwpd->sc_pd; init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num; if (!rdma_protocol_roce(&iwdev->ibdev, 1)) init_info.qp_uk_init_info.first_sq_wq = 1; iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp; init_waitqueue_head(&iwqp->waitq); init_waitqueue_head(&iwqp->mod_qp_waitq); if (udata) { init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver; err_code = irdma_setup_umode_qp(udata, iwdev, iwqp, &init_info, init_attr); } else { INIT_DELAYED_WORK(&iwqp->dwork_flush, irdma_flush_worker); init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER; err_code = irdma_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr); } if (err_code) { ibdev_dbg(&iwdev->ibdev, "VERBS: setup qp failed\n"); goto error; } if (rdma_protocol_roce(&iwdev->ibdev, 1)) { if (init_attr->qp_type == IB_QPT_RC) { init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_RC; init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM | IRDMA_WRITE_WITH_IMM | IRDMA_ROCE; } else { init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_UD; init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM | IRDMA_ROCE; } } else { init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_IWARP; init_info.qp_uk_init_info.qp_caps = IRDMA_WRITE_WITH_IMM; } if (dev->hw_attrs.uk_attrs.hw_rev > IRDMA_GEN_1) init_info.qp_uk_init_info.qp_caps |= IRDMA_PUSH_MODE; err_code = irdma_sc_qp_init(qp, &init_info); if (err_code) { ibdev_dbg(&iwdev->ibdev, "VERBS: qp_init fail\n"); goto error; } ctx_info = &iwqp->ctx_info; ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id; ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id; if (rdma_protocol_roce(&iwdev->ibdev, 1)) irdma_roce_fill_and_set_qpctx_info(iwqp, ctx_info); else irdma_iw_fill_and_set_qpctx_info(iwqp, ctx_info); err_code = irdma_cqp_create_qp_cmd(iwqp); if (err_code) goto error; refcount_set(&iwqp->refcnt, 1); spin_lock_init(&iwqp->lock); spin_lock_init(&iwqp->sc_qp.pfpdu.lock); iwqp->sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR; rf->qp_table[qp_num] = iwqp; if (rdma_protocol_roce(&iwdev->ibdev, 1)) { if (dev->ws_add(&iwdev->vsi, 0)) { irdma_cqp_qp_destroy_cmd(&rf->sc_dev, &iwqp->sc_qp); err_code = -EINVAL; goto error; } irdma_qp_add_qos(&iwqp->sc_qp); } if (udata) { /* GEN_1 legacy support with libi40iw does not have expanded uresp struct */ if (udata->outlen < sizeof(uresp)) { uresp.lsmm = 1; uresp.push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX_GEN_1; } else { if (rdma_protocol_iwarp(&iwdev->ibdev, 1)) uresp.lsmm = 1; } uresp.actual_sq_size = init_info.qp_uk_init_info.sq_size; uresp.actual_rq_size = init_info.qp_uk_init_info.rq_size; uresp.qp_id = qp_num; uresp.qp_caps = qp->qp_uk.qp_caps; err_code = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen)); if (err_code) { ibdev_dbg(&iwdev->ibdev, "VERBS: copy_to_udata failed\n"); irdma_destroy_qp(&iwqp->ibqp, udata); return err_code; } } init_completion(&iwqp->free_qp); return 0; error: irdma_free_qp_rsrc(iwqp); return err_code; } static int irdma_get_ib_acc_flags(struct irdma_qp *iwqp) { int acc_flags = 0; if (rdma_protocol_roce(iwqp->ibqp.device, 1)) { if (iwqp->roce_info.wr_rdresp_en) { acc_flags |= IB_ACCESS_LOCAL_WRITE; acc_flags |= IB_ACCESS_REMOTE_WRITE; } if (iwqp->roce_info.rd_en) acc_flags |= IB_ACCESS_REMOTE_READ; if (iwqp->roce_info.bind_en) acc_flags |= IB_ACCESS_MW_BIND; } else { if (iwqp->iwarp_info.wr_rdresp_en) { acc_flags |= IB_ACCESS_LOCAL_WRITE; acc_flags |= IB_ACCESS_REMOTE_WRITE; } if (iwqp->iwarp_info.rd_en) acc_flags |= IB_ACCESS_REMOTE_READ; if (iwqp->iwarp_info.bind_en) acc_flags |= IB_ACCESS_MW_BIND; } return acc_flags; } /** * irdma_query_qp - query qp attributes * @ibqp: qp pointer * @attr: attributes pointer * @attr_mask: Not used * @init_attr: qp attributes to return */ static int irdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_qp_init_attr *init_attr) { struct irdma_qp *iwqp = to_iwqp(ibqp); struct irdma_sc_qp *qp = &iwqp->sc_qp; memset(attr, 0, sizeof(*attr)); memset(init_attr, 0, sizeof(*init_attr)); attr->qp_state = iwqp->ibqp_state; attr->cur_qp_state = iwqp->ibqp_state; attr->cap.max_send_wr = iwqp->max_send_wr; attr->cap.max_recv_wr = iwqp->max_recv_wr; attr->cap.max_inline_data = qp->qp_uk.max_inline_data; attr->cap.max_send_sge = qp->qp_uk.max_sq_frag_cnt; attr->cap.max_recv_sge = qp->qp_uk.max_rq_frag_cnt; attr->qp_access_flags = irdma_get_ib_acc_flags(iwqp); attr->port_num = 1; if (rdma_protocol_roce(ibqp->device, 1)) { attr->path_mtu = ib_mtu_int_to_enum(iwqp->udp_info.snd_mss); attr->qkey = iwqp->roce_info.qkey; attr->rq_psn = iwqp->udp_info.epsn; attr->sq_psn = iwqp->udp_info.psn_nxt; attr->dest_qp_num = iwqp->roce_info.dest_qp; attr->pkey_index = iwqp->roce_info.p_key; attr->retry_cnt = iwqp->udp_info.rexmit_thresh; attr->rnr_retry = iwqp->udp_info.rnr_nak_thresh; attr->max_rd_atomic = iwqp->roce_info.ord_size; attr->max_dest_rd_atomic = iwqp->roce_info.ird_size; } init_attr->event_handler = iwqp->ibqp.event_handler; init_attr->qp_context = iwqp->ibqp.qp_context; init_attr->send_cq = iwqp->ibqp.send_cq; init_attr->recv_cq = iwqp->ibqp.recv_cq; init_attr->cap = attr->cap; return 0; } /** * irdma_query_pkey - Query partition key * @ibdev: device pointer from stack * @port: port number * @index: index of pkey * @pkey: pointer to store the pkey */ static int irdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey) { if (index >= IRDMA_PKEY_TBL_SZ) return -EINVAL; *pkey = IRDMA_DEFAULT_PKEY; return 0; } static u8 irdma_roce_get_vlan_prio(const struct ib_gid_attr *attr, u8 prio) { struct net_device *ndev; rcu_read_lock(); ndev = rcu_dereference(attr->ndev); if (!ndev) goto exit; if (is_vlan_dev(ndev)) { u16 vlan_qos = vlan_dev_get_egress_qos_mask(ndev, prio); prio = (vlan_qos & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; } exit: rcu_read_unlock(); return prio; } /** * irdma_modify_qp_roce - modify qp request * @ibqp: qp's pointer for modify * @attr: access attributes * @attr_mask: state mask * @udata: user data */ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { #define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush) #define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid) struct irdma_pd *iwpd = to_iwpd(ibqp->pd); struct irdma_qp *iwqp = to_iwqp(ibqp); struct irdma_device *iwdev = iwqp->iwdev; struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; struct irdma_qp_host_ctx_info *ctx_info; struct irdma_roce_offload_info *roce_info; struct irdma_udp_offload_info *udp_info; struct irdma_modify_qp_info info = {}; struct irdma_modify_qp_resp uresp = {}; struct irdma_modify_qp_req ureq = {}; unsigned long flags; u8 issue_modify_qp = 0; int ret = 0; ctx_info = &iwqp->ctx_info; roce_info = &iwqp->roce_info; udp_info = &iwqp->udp_info; if (udata) { /* udata inlen/outlen can be 0 when supporting legacy libi40iw */ if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) || (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN)) return -EINVAL; } if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) return -EOPNOTSUPP; if (attr_mask & IB_QP_DEST_QPN) roce_info->dest_qp = attr->dest_qp_num; if (attr_mask & IB_QP_PKEY_INDEX) { ret = irdma_query_pkey(ibqp->device, 0, attr->pkey_index, &roce_info->p_key); if (ret) return ret; } if (attr_mask & IB_QP_QKEY) roce_info->qkey = attr->qkey; if (attr_mask & IB_QP_PATH_MTU) udp_info->snd_mss = ib_mtu_enum_to_int(attr->path_mtu); if (attr_mask & IB_QP_SQ_PSN) { udp_info->psn_nxt = attr->sq_psn; udp_info->lsn = 0xffff; udp_info->psn_una = attr->sq_psn; udp_info->psn_max = attr->sq_psn; } if (attr_mask & IB_QP_RQ_PSN) udp_info->epsn = attr->rq_psn; if (attr_mask & IB_QP_RNR_RETRY) udp_info->rnr_nak_thresh = attr->rnr_retry; if (attr_mask & IB_QP_RETRY_CNT) udp_info->rexmit_thresh = attr->retry_cnt; ctx_info->roce_info->pd_id = iwpd->sc_pd.pd_id; if (attr_mask & IB_QP_AV) { struct irdma_av *av = &iwqp->roce_ah.av; const struct ib_gid_attr *sgid_attr = attr->ah_attr.grh.sgid_attr; u16 vlan_id = VLAN_N_VID; u32 local_ip[4]; memset(&iwqp->roce_ah, 0, sizeof(iwqp->roce_ah)); if (attr->ah_attr.ah_flags & IB_AH_GRH) { udp_info->ttl = attr->ah_attr.grh.hop_limit; udp_info->flow_label = attr->ah_attr.grh.flow_label; udp_info->tos = attr->ah_attr.grh.traffic_class; udp_info->src_port = rdma_get_udp_sport(udp_info->flow_label, ibqp->qp_num, roce_info->dest_qp); irdma_qp_rem_qos(&iwqp->sc_qp); dev->ws_remove(iwqp->sc_qp.vsi, ctx_info->user_pri); if (iwqp->sc_qp.vsi->dscp_mode) ctx_info->user_pri = iwqp->sc_qp.vsi->dscp_map[irdma_tos2dscp(udp_info->tos)]; else ctx_info->user_pri = rt_tos2priority(udp_info->tos); } ret = rdma_read_gid_l2_fields(sgid_attr, &vlan_id, ctx_info->roce_info->mac_addr); if (ret) return ret; ctx_info->user_pri = irdma_roce_get_vlan_prio(sgid_attr, ctx_info->user_pri); if (dev->ws_add(iwqp->sc_qp.vsi, ctx_info->user_pri)) return -ENOMEM; iwqp->sc_qp.user_pri = ctx_info->user_pri; irdma_qp_add_qos(&iwqp->sc_qp); if (vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode) vlan_id = 0; if (vlan_id < VLAN_N_VID) { udp_info->insert_vlan_tag = true; udp_info->vlan_tag = vlan_id | ctx_info->user_pri << VLAN_PRIO_SHIFT; } else { udp_info->insert_vlan_tag = false; } av->attrs = attr->ah_attr; rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid_attr->gid); rdma_gid2ip((struct sockaddr *)&av->dgid_addr, &attr->ah_attr.grh.dgid); av->net_type = rdma_gid_attr_network_type(sgid_attr); if (av->net_type == RDMA_NETWORK_IPV6) { __be32 *daddr = av->dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32; __be32 *saddr = av->sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32; irdma_copy_ip_ntohl(&udp_info->dest_ip_addr[0], daddr); irdma_copy_ip_ntohl(&udp_info->local_ipaddr[0], saddr); udp_info->ipv4 = false; irdma_copy_ip_ntohl(local_ip, daddr); } else if (av->net_type == RDMA_NETWORK_IPV4) { __be32 saddr = av->sgid_addr.saddr_in.sin_addr.s_addr; __be32 daddr = av->dgid_addr.saddr_in.sin_addr.s_addr; local_ip[0] = ntohl(daddr); udp_info->ipv4 = true; udp_info->dest_ip_addr[0] = 0; udp_info->dest_ip_addr[1] = 0; udp_info->dest_ip_addr[2] = 0; udp_info->dest_ip_addr[3] = local_ip[0]; udp_info->local_ipaddr[0] = 0; udp_info->local_ipaddr[1] = 0; udp_info->local_ipaddr[2] = 0; udp_info->local_ipaddr[3] = ntohl(saddr); } udp_info->arp_idx = irdma_add_arp(iwdev->rf, local_ip, udp_info->ipv4, attr->ah_attr.roce.dmac); } if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { if (attr->max_rd_atomic > dev->hw_attrs.max_hw_ord) { ibdev_err(&iwdev->ibdev, "rd_atomic = %d, above max_hw_ord=%d\n", attr->max_rd_atomic, dev->hw_attrs.max_hw_ord); return -EINVAL; } if (attr->max_rd_atomic) roce_info->ord_size = attr->max_rd_atomic; info.ord_valid = true; } if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { if (attr->max_dest_rd_atomic > dev->hw_attrs.max_hw_ird) { ibdev_err(&iwdev->ibdev, "rd_atomic = %d, above max_hw_ird=%d\n", attr->max_rd_atomic, dev->hw_attrs.max_hw_ird); return -EINVAL; } if (attr->max_dest_rd_atomic) roce_info->ird_size = attr->max_dest_rd_atomic; } if (attr_mask & IB_QP_ACCESS_FLAGS) { if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE) roce_info->wr_rdresp_en = true; if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE) roce_info->wr_rdresp_en = true; if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ) roce_info->rd_en = true; } wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend)); ibdev_dbg(&iwdev->ibdev, "VERBS: caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d attr_mask=0x%x\n", __builtin_return_address(0), ibqp->qp_num, attr->qp_state, iwqp->ibqp_state, iwqp->iwarp_state, attr_mask); spin_lock_irqsave(&iwqp->lock, flags); if (attr_mask & IB_QP_STATE) { if (!ib_modify_qp_is_ok(iwqp->ibqp_state, attr->qp_state, iwqp->ibqp.qp_type, attr_mask)) { ibdev_warn(&iwdev->ibdev, "modify_qp invalid for qp_id=%d, old_state=0x%x, new_state=0x%x\n", iwqp->ibqp.qp_num, iwqp->ibqp_state, attr->qp_state); ret = -EINVAL; goto exit; } info.curr_iwarp_state = iwqp->iwarp_state; switch (attr->qp_state) { case IB_QPS_INIT: if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) { ret = -EINVAL; goto exit; } if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) { info.next_iwarp_state = IRDMA_QP_STATE_IDLE; issue_modify_qp = 1; } break; case IB_QPS_RTR: if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) { ret = -EINVAL; goto exit; } info.arp_cache_idx_valid = true; info.cq_num_valid = true; info.next_iwarp_state = IRDMA_QP_STATE_RTR; issue_modify_qp = 1; break; case IB_QPS_RTS: if (iwqp->ibqp_state < IB_QPS_RTR || iwqp->ibqp_state == IB_QPS_ERR) { ret = -EINVAL; goto exit; } info.arp_cache_idx_valid = true; info.cq_num_valid = true; info.ord_valid = true; info.next_iwarp_state = IRDMA_QP_STATE_RTS; issue_modify_qp = 1; if (iwdev->push_mode && udata && iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { spin_unlock_irqrestore(&iwqp->lock, flags); irdma_alloc_push_page(iwqp); spin_lock_irqsave(&iwqp->lock, flags); } break; case IB_QPS_SQD: if (iwqp->iwarp_state == IRDMA_QP_STATE_SQD) goto exit; if (iwqp->iwarp_state != IRDMA_QP_STATE_RTS) { ret = -EINVAL; goto exit; } info.next_iwarp_state = IRDMA_QP_STATE_SQD; issue_modify_qp = 1; break; case IB_QPS_SQE: case IB_QPS_ERR: case IB_QPS_RESET: if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS) { spin_unlock_irqrestore(&iwqp->lock, flags); info.next_iwarp_state = IRDMA_QP_STATE_SQD; irdma_hw_modify_qp(iwdev, iwqp, &info, true); spin_lock_irqsave(&iwqp->lock, flags); } if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) { spin_unlock_irqrestore(&iwqp->lock, flags); if (udata && udata->inlen) { if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq), udata->inlen))) return -EINVAL; irdma_flush_wqes(iwqp, (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) | (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) | IRDMA_REFLUSH); } return 0; } info.next_iwarp_state = IRDMA_QP_STATE_ERROR; issue_modify_qp = 1; break; default: ret = -EINVAL; goto exit; } iwqp->ibqp_state = attr->qp_state; } ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id; ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id; irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info); spin_unlock_irqrestore(&iwqp->lock, flags); if (attr_mask & IB_QP_STATE) { if (issue_modify_qp) { ctx_info->rem_endpoint_idx = udp_info->arp_idx; if (irdma_hw_modify_qp(iwdev, iwqp, &info, true)) return -EINVAL; spin_lock_irqsave(&iwqp->lock, flags); if (iwqp->iwarp_state == info.curr_iwarp_state) { iwqp->iwarp_state = info.next_iwarp_state; iwqp->ibqp_state = attr->qp_state; } if (iwqp->ibqp_state > IB_QPS_RTS && !iwqp->flush_issued) { spin_unlock_irqrestore(&iwqp->lock, flags); irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ | IRDMA_FLUSH_RQ | IRDMA_FLUSH_WAIT); iwqp->flush_issued = 1; } else { spin_unlock_irqrestore(&iwqp->lock, flags); } } else { iwqp->ibqp_state = attr->qp_state; } if (udata && udata->outlen && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { struct irdma_ucontext *ucontext; ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext); if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX && !iwqp->push_wqe_mmap_entry && !irdma_setup_push_mmap_entries(ucontext, iwqp, &uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) { uresp.push_valid = 1; uresp.push_offset = iwqp->sc_qp.push_offset; } ret = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen)); if (ret) { irdma_remove_push_mmap_entries(iwqp); ibdev_dbg(&iwdev->ibdev, "VERBS: copy_to_udata failed\n"); return ret; } } } return 0; exit: spin_unlock_irqrestore(&iwqp->lock, flags); return ret; } /** * irdma_modify_qp - modify qp request * @ibqp: qp's pointer for modify * @attr: access attributes * @attr_mask: state mask * @udata: user data */ int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { #define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush) #define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid) struct irdma_qp *iwqp = to_iwqp(ibqp); struct irdma_device *iwdev = iwqp->iwdev; struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; struct irdma_qp_host_ctx_info *ctx_info; struct irdma_tcp_offload_info *tcp_info; struct irdma_iwarp_offload_info *offload_info; struct irdma_modify_qp_info info = {}; struct irdma_modify_qp_resp uresp = {}; struct irdma_modify_qp_req ureq = {}; u8 issue_modify_qp = 0; u8 dont_wait = 0; int err; unsigned long flags; if (udata) { /* udata inlen/outlen can be 0 when supporting legacy libi40iw */ if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) || (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN)) return -EINVAL; } if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) return -EOPNOTSUPP; ctx_info = &iwqp->ctx_info; offload_info = &iwqp->iwarp_info; tcp_info = &iwqp->tcp_info; wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend)); ibdev_dbg(&iwdev->ibdev, "VERBS: caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d last_aeq=%d hw_tcp_state=%d hw_iwarp_state=%d attr_mask=0x%x\n", __builtin_return_address(0), ibqp->qp_num, attr->qp_state, iwqp->ibqp_state, iwqp->iwarp_state, iwqp->last_aeq, iwqp->hw_tcp_state, iwqp->hw_iwarp_state, attr_mask); spin_lock_irqsave(&iwqp->lock, flags); if (attr_mask & IB_QP_STATE) { info.curr_iwarp_state = iwqp->iwarp_state; switch (attr->qp_state) { case IB_QPS_INIT: case IB_QPS_RTR: if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) { err = -EINVAL; goto exit; } if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) { info.next_iwarp_state = IRDMA_QP_STATE_IDLE; issue_modify_qp = 1; } if (iwdev->push_mode && udata && iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { spin_unlock_irqrestore(&iwqp->lock, flags); irdma_alloc_push_page(iwqp); spin_lock_irqsave(&iwqp->lock, flags); } break; case IB_QPS_RTS: if (iwqp->iwarp_state > IRDMA_QP_STATE_RTS || !iwqp->cm_id) { err = -EINVAL; goto exit; } issue_modify_qp = 1; iwqp->hw_tcp_state = IRDMA_TCP_STATE_ESTABLISHED; iwqp->hte_added = 1; info.next_iwarp_state = IRDMA_QP_STATE_RTS; info.tcp_ctx_valid = true; info.ord_valid = true; info.arp_cache_idx_valid = true; info.cq_num_valid = true; break; case IB_QPS_SQD: if (iwqp->hw_iwarp_state > IRDMA_QP_STATE_RTS) { err = 0; goto exit; } if (iwqp->iwarp_state == IRDMA_QP_STATE_CLOSING || iwqp->iwarp_state < IRDMA_QP_STATE_RTS) { err = 0; goto exit; } if (iwqp->iwarp_state > IRDMA_QP_STATE_CLOSING) { err = -EINVAL; goto exit; } info.next_iwarp_state = IRDMA_QP_STATE_CLOSING; issue_modify_qp = 1; break; case IB_QPS_SQE: if (iwqp->iwarp_state >= IRDMA_QP_STATE_TERMINATE) { err = -EINVAL; goto exit; } info.next_iwarp_state = IRDMA_QP_STATE_TERMINATE; issue_modify_qp = 1; break; case IB_QPS_ERR: case IB_QPS_RESET: if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) { spin_unlock_irqrestore(&iwqp->lock, flags); if (udata && udata->inlen) { if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq), udata->inlen))) return -EINVAL; irdma_flush_wqes(iwqp, (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) | (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) | IRDMA_REFLUSH); } return 0; } if (iwqp->sc_qp.term_flags) { spin_unlock_irqrestore(&iwqp->lock, flags); irdma_terminate_del_timer(&iwqp->sc_qp); spin_lock_irqsave(&iwqp->lock, flags); } info.next_iwarp_state = IRDMA_QP_STATE_ERROR; if (iwqp->hw_tcp_state > IRDMA_TCP_STATE_CLOSED && iwdev->iw_status && iwqp->hw_tcp_state != IRDMA_TCP_STATE_TIME_WAIT) info.reset_tcp_conn = true; else dont_wait = 1; issue_modify_qp = 1; info.next_iwarp_state = IRDMA_QP_STATE_ERROR; break; default: err = -EINVAL; goto exit; } iwqp->ibqp_state = attr->qp_state; } if (attr_mask & IB_QP_ACCESS_FLAGS) { ctx_info->iwarp_info_valid = true; if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE) offload_info->wr_rdresp_en = true; if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE) offload_info->wr_rdresp_en = true; if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ) offload_info->rd_en = true; } if (ctx_info->iwarp_info_valid) { ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id; ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id; irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info); } spin_unlock_irqrestore(&iwqp->lock, flags); if (attr_mask & IB_QP_STATE) { if (issue_modify_qp) { ctx_info->rem_endpoint_idx = tcp_info->arp_idx; if (irdma_hw_modify_qp(iwdev, iwqp, &info, true)) return -EINVAL; } spin_lock_irqsave(&iwqp->lock, flags); if (iwqp->iwarp_state == info.curr_iwarp_state) { iwqp->iwarp_state = info.next_iwarp_state; iwqp->ibqp_state = attr->qp_state; } spin_unlock_irqrestore(&iwqp->lock, flags); } if (issue_modify_qp && iwqp->ibqp_state > IB_QPS_RTS) { if (dont_wait) { if (iwqp->hw_tcp_state) { spin_lock_irqsave(&iwqp->lock, flags); iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSED; iwqp->last_aeq = IRDMA_AE_RESET_SENT; spin_unlock_irqrestore(&iwqp->lock, flags); } irdma_cm_disconn(iwqp); } else { int close_timer_started; spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags); if (iwqp->cm_node) { refcount_inc(&iwqp->cm_node->refcnt); spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags); close_timer_started = atomic_inc_return(&iwqp->close_timer_started); if (iwqp->cm_id && close_timer_started == 1) irdma_schedule_cm_timer(iwqp->cm_node, (struct irdma_puda_buf *)iwqp, IRDMA_TIMER_TYPE_CLOSE, 1, 0); irdma_rem_ref_cm_node(iwqp->cm_node); } else { spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags); } } } if (attr_mask & IB_QP_STATE && udata && udata->outlen && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { struct irdma_ucontext *ucontext; ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext); if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX && !iwqp->push_wqe_mmap_entry && !irdma_setup_push_mmap_entries(ucontext, iwqp, &uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) { uresp.push_valid = 1; uresp.push_offset = iwqp->sc_qp.push_offset; } err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen)); if (err) { irdma_remove_push_mmap_entries(iwqp); ibdev_dbg(&iwdev->ibdev, "VERBS: copy_to_udata failed\n"); return err; } } return 0; exit: spin_unlock_irqrestore(&iwqp->lock, flags); return err; } /** * irdma_cq_free_rsrc - free up resources for cq * @rf: RDMA PCI function * @iwcq: cq ptr */ static void irdma_cq_free_rsrc(struct irdma_pci_f *rf, struct irdma_cq *iwcq) { struct irdma_sc_cq *cq = &iwcq->sc_cq; if (!iwcq->user_mode) { dma_free_coherent(rf->sc_dev.hw->device, iwcq->kmem.size, iwcq->kmem.va, iwcq->kmem.pa); iwcq->kmem.va = NULL; dma_free_coherent(rf->sc_dev.hw->device, iwcq->kmem_shadow.size, iwcq->kmem_shadow.va, iwcq->kmem_shadow.pa); iwcq->kmem_shadow.va = NULL; } irdma_free_rsrc(rf, rf->allocated_cqs, cq->cq_uk.cq_id); } /** * irdma_free_cqbuf - worker to free a cq buffer * @work: provides access to the cq buffer to free */ static void irdma_free_cqbuf(struct work_struct *work) { struct irdma_cq_buf *cq_buf = container_of(work, struct irdma_cq_buf, work); dma_free_coherent(cq_buf->hw->device, cq_buf->kmem_buf.size, cq_buf->kmem_buf.va, cq_buf->kmem_buf.pa); cq_buf->kmem_buf.va = NULL; kfree(cq_buf); } /** * irdma_process_resize_list - remove resized cq buffers from the resize_list * @iwcq: cq which owns the resize_list * @iwdev: irdma device * @lcqe_buf: the buffer where the last cqe is received */ static int irdma_process_resize_list(struct irdma_cq *iwcq, struct irdma_device *iwdev, struct irdma_cq_buf *lcqe_buf) { struct list_head *tmp_node, *list_node; struct irdma_cq_buf *cq_buf; int cnt = 0; list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) { cq_buf = list_entry(list_node, struct irdma_cq_buf, list); if (cq_buf == lcqe_buf) return cnt; list_del(&cq_buf->list); queue_work(iwdev->cleanup_wq, &cq_buf->work); cnt++; } return cnt; } /** * irdma_destroy_cq - destroy cq * @ib_cq: cq pointer * @udata: user data */ static int irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) { struct irdma_device *iwdev = to_iwdev(ib_cq->device); struct irdma_cq *iwcq = to_iwcq(ib_cq); struct irdma_sc_cq *cq = &iwcq->sc_cq; struct irdma_sc_dev *dev = cq->dev; struct irdma_sc_ceq *ceq = dev->ceq[cq->ceq_id]; struct irdma_ceq *iwceq = container_of(ceq, struct irdma_ceq, sc_ceq); unsigned long flags; spin_lock_irqsave(&iwcq->lock, flags); if (!list_empty(&iwcq->cmpl_generated)) irdma_remove_cmpls_list(iwcq); if (!list_empty(&iwcq->resize_list)) irdma_process_resize_list(iwcq, iwdev, NULL); spin_unlock_irqrestore(&iwcq->lock, flags); irdma_cq_rem_ref(ib_cq); wait_for_completion(&iwcq->free_cq); irdma_cq_wq_destroy(iwdev->rf, cq); spin_lock_irqsave(&iwceq->ce_lock, flags); irdma_sc_cleanup_ceqes(cq, ceq); spin_unlock_irqrestore(&iwceq->ce_lock, flags); irdma_cq_free_rsrc(iwdev->rf, iwcq); return 0; } /** * irdma_resize_cq - resize cq * @ibcq: cq to be resized * @entries: desired cq size * @udata: user data */ static int irdma_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) { #define IRDMA_RESIZE_CQ_MIN_REQ_LEN offsetofend(struct irdma_resize_cq_req, user_cq_buffer) struct irdma_cq *iwcq = to_iwcq(ibcq); struct irdma_sc_dev *dev = iwcq->sc_cq.dev; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; struct irdma_modify_cq_info *m_info; struct irdma_modify_cq_info info = {}; struct irdma_dma_mem kmem_buf; struct irdma_cq_mr *cqmr_buf; struct irdma_pbl *iwpbl_buf; struct irdma_device *iwdev; struct irdma_pci_f *rf; struct irdma_cq_buf *cq_buf = NULL; unsigned long flags; int ret; iwdev = to_iwdev(ibcq->device); rf = iwdev->rf; if (!(rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)) return -EOPNOTSUPP; if (udata && udata->inlen < IRDMA_RESIZE_CQ_MIN_REQ_LEN) return -EINVAL; if (entries > rf->max_cqe) return -EINVAL; if (!iwcq->user_mode) { entries++; if (rf->sc_dev.hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) entries *= 2; } info.cq_size = max(entries, 4); if (info.cq_size == iwcq->sc_cq.cq_uk.cq_size - 1) return 0; if (udata) { struct irdma_resize_cq_req req = {}; struct irdma_ucontext *ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext); /* CQ resize not supported with legacy GEN_1 libi40iw */ if (ucontext->legacy_mode) return -EOPNOTSUPP; if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) return -EINVAL; spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); iwpbl_buf = irdma_get_pbl((unsigned long)req.user_cq_buffer, &ucontext->cq_reg_mem_list); spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); if (!iwpbl_buf) return -ENOMEM; cqmr_buf = &iwpbl_buf->cq_mr; if (iwpbl_buf->pbl_allocated) { info.virtual_map = true; info.pbl_chunk_size = 1; info.first_pm_pbl_idx = cqmr_buf->cq_pbl.idx; } else { info.cq_pa = cqmr_buf->cq_pbl.addr; } } else { /* Kmode CQ resize */ int rsize; rsize = info.cq_size * sizeof(struct irdma_cqe); kmem_buf.size = ALIGN(round_up(rsize, 256), 256); kmem_buf.va = dma_alloc_coherent(dev->hw->device, kmem_buf.size, &kmem_buf.pa, GFP_KERNEL); if (!kmem_buf.va) return -ENOMEM; info.cq_base = kmem_buf.va; info.cq_pa = kmem_buf.pa; cq_buf = kzalloc(sizeof(*cq_buf), GFP_KERNEL); if (!cq_buf) { ret = -ENOMEM; goto error; } } cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); if (!cqp_request) { ret = -ENOMEM; goto error; } info.shadow_read_threshold = iwcq->sc_cq.shadow_read_threshold; info.cq_resize = true; cqp_info = &cqp_request->info; m_info = &cqp_info->in.u.cq_modify.info; memcpy(m_info, &info, sizeof(*m_info)); cqp_info->cqp_cmd = IRDMA_OP_CQ_MODIFY; cqp_info->in.u.cq_modify.cq = &iwcq->sc_cq; cqp_info->in.u.cq_modify.scratch = (uintptr_t)cqp_request; cqp_info->post_sq = 1; ret = irdma_handle_cqp_op(rf, cqp_request); irdma_put_cqp_request(&rf->cqp, cqp_request); if (ret) goto error; spin_lock_irqsave(&iwcq->lock, flags); if (cq_buf) { cq_buf->kmem_buf = iwcq->kmem; cq_buf->hw = dev->hw; memcpy(&cq_buf->cq_uk, &iwcq->sc_cq.cq_uk, sizeof(cq_buf->cq_uk)); INIT_WORK(&cq_buf->work, irdma_free_cqbuf); list_add_tail(&cq_buf->list, &iwcq->resize_list); iwcq->kmem = kmem_buf; } irdma_sc_cq_resize(&iwcq->sc_cq, &info); ibcq->cqe = info.cq_size - 1; spin_unlock_irqrestore(&iwcq->lock, flags); return 0; error: if (!udata) { dma_free_coherent(dev->hw->device, kmem_buf.size, kmem_buf.va, kmem_buf.pa); kmem_buf.va = NULL; } kfree(cq_buf); return ret; } static inline int cq_validate_flags(u32 flags, u8 hw_rev) { /* GEN1 does not support CQ create flags */ if (hw_rev == IRDMA_GEN_1) return flags ? -EOPNOTSUPP : 0; return flags & ~IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION ? -EOPNOTSUPP : 0; } /** * irdma_create_cq - create cq * @ibcq: CQ allocated * @attr: attributes for cq * @udata: user data */ static int irdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct ib_udata *udata) { #define IRDMA_CREATE_CQ_MIN_REQ_LEN offsetofend(struct irdma_create_cq_req, user_cq_buf) #define IRDMA_CREATE_CQ_MIN_RESP_LEN offsetofend(struct irdma_create_cq_resp, cq_size) struct ib_device *ibdev = ibcq->device; struct irdma_device *iwdev = to_iwdev(ibdev); struct irdma_pci_f *rf = iwdev->rf; struct irdma_cq *iwcq = to_iwcq(ibcq); u32 cq_num = 0; struct irdma_sc_cq *cq; struct irdma_sc_dev *dev = &rf->sc_dev; struct irdma_cq_init_info info = {}; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; struct irdma_cq_uk_init_info *ukinfo = &info.cq_uk_init_info; unsigned long flags; int err_code; int entries = attr->cqe; err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev); if (err_code) return err_code; if (udata && (udata->inlen < IRDMA_CREATE_CQ_MIN_REQ_LEN || udata->outlen < IRDMA_CREATE_CQ_MIN_RESP_LEN)) return -EINVAL; err_code = irdma_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num, &rf->next_cq); if (err_code) return err_code; cq = &iwcq->sc_cq; cq->back_cq = iwcq; refcount_set(&iwcq->refcnt, 1); spin_lock_init(&iwcq->lock); INIT_LIST_HEAD(&iwcq->resize_list); INIT_LIST_HEAD(&iwcq->cmpl_generated); info.dev = dev; ukinfo->cq_size = max(entries, 4); ukinfo->cq_id = cq_num; iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size; if (attr->comp_vector < rf->ceqs_count) info.ceq_id = attr->comp_vector; info.ceq_id_valid = true; info.ceqe_mask = 1; info.type = IRDMA_CQ_TYPE_IWARP; info.vsi = &iwdev->vsi; if (udata) { struct irdma_ucontext *ucontext; struct irdma_create_cq_req req = {}; struct irdma_cq_mr *cqmr; struct irdma_pbl *iwpbl; struct irdma_pbl *iwpbl_shadow; struct irdma_cq_mr *cqmr_shadow; iwcq->user_mode = true; ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext); if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) { err_code = -EFAULT; goto cq_free_rsrc; } spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); iwpbl = irdma_get_pbl((unsigned long)req.user_cq_buf, &ucontext->cq_reg_mem_list); spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); if (!iwpbl) { err_code = -EPROTO; goto cq_free_rsrc; } iwcq->iwpbl = iwpbl; iwcq->cq_mem_size = 0; cqmr = &iwpbl->cq_mr; if (rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE && !ucontext->legacy_mode) { spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); iwpbl_shadow = irdma_get_pbl( (unsigned long)req.user_shadow_area, &ucontext->cq_reg_mem_list); spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); if (!iwpbl_shadow) { err_code = -EPROTO; goto cq_free_rsrc; } iwcq->iwpbl_shadow = iwpbl_shadow; cqmr_shadow = &iwpbl_shadow->cq_mr; info.shadow_area_pa = cqmr_shadow->cq_pbl.addr; cqmr->split = true; } else { info.shadow_area_pa = cqmr->shadow; } if (iwpbl->pbl_allocated) { info.virtual_map = true; info.pbl_chunk_size = 1; info.first_pm_pbl_idx = cqmr->cq_pbl.idx; } else { info.cq_base_pa = cqmr->cq_pbl.addr; } } else { /* Kmode allocations */ int rsize; if (entries < 1 || entries > rf->max_cqe) { err_code = -EINVAL; goto cq_free_rsrc; } entries++; if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) entries *= 2; ukinfo->cq_size = entries; rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe); iwcq->kmem.size = ALIGN(round_up(rsize, 256), 256); iwcq->kmem.va = dma_alloc_coherent(dev->hw->device, iwcq->kmem.size, &iwcq->kmem.pa, GFP_KERNEL); if (!iwcq->kmem.va) { err_code = -ENOMEM; goto cq_free_rsrc; } iwcq->kmem_shadow.size = ALIGN(IRDMA_SHADOW_AREA_SIZE << 3, 64); iwcq->kmem_shadow.va = dma_alloc_coherent(dev->hw->device, iwcq->kmem_shadow.size, &iwcq->kmem_shadow.pa, GFP_KERNEL); if (!iwcq->kmem_shadow.va) { err_code = -ENOMEM; goto cq_free_rsrc; } info.shadow_area_pa = iwcq->kmem_shadow.pa; ukinfo->shadow_area = iwcq->kmem_shadow.va; ukinfo->cq_base = iwcq->kmem.va; info.cq_base_pa = iwcq->kmem.pa; } if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2, (u32)IRDMA_MAX_CQ_READ_THRESH); if (irdma_sc_cq_init(cq, &info)) { ibdev_dbg(&iwdev->ibdev, "VERBS: init cq fail\n"); err_code = -EPROTO; goto cq_free_rsrc; } cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); if (!cqp_request) { err_code = -ENOMEM; goto cq_free_rsrc; } cqp_info = &cqp_request->info; cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE; cqp_info->post_sq = 1; cqp_info->in.u.cq_create.cq = cq; cqp_info->in.u.cq_create.check_overflow = true; cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request; err_code = irdma_handle_cqp_op(rf, cqp_request); irdma_put_cqp_request(&rf->cqp, cqp_request); if (err_code) goto cq_free_rsrc; if (udata) { struct irdma_create_cq_resp resp = {}; resp.cq_id = info.cq_uk_init_info.cq_id; resp.cq_size = info.cq_uk_init_info.cq_size; if (ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen))) { ibdev_dbg(&iwdev->ibdev, "VERBS: copy to user data\n"); err_code = -EPROTO; goto cq_destroy; } } rf->cq_table[cq_num] = iwcq; init_completion(&iwcq->free_cq); return 0; cq_destroy: irdma_cq_wq_destroy(rf, cq); cq_free_rsrc: irdma_cq_free_rsrc(rf, iwcq); return err_code; } /** * irdma_get_mr_access - get hw MR access permissions from IB access flags * @access: IB access flags */ static inline u16 irdma_get_mr_access(int access) { u16 hw_access = 0; hw_access |= (access & IB_ACCESS_LOCAL_WRITE) ? IRDMA_ACCESS_FLAGS_LOCALWRITE : 0; hw_access |= (access & IB_ACCESS_REMOTE_WRITE) ? IRDMA_ACCESS_FLAGS_REMOTEWRITE : 0; hw_access |= (access & IB_ACCESS_REMOTE_READ) ? IRDMA_ACCESS_FLAGS_REMOTEREAD : 0; hw_access |= (access & IB_ACCESS_MW_BIND) ? IRDMA_ACCESS_FLAGS_BIND_WINDOW : 0; hw_access |= (access & IB_ZERO_BASED) ? IRDMA_ACCESS_FLAGS_ZERO_BASED : 0; hw_access |= IRDMA_ACCESS_FLAGS_LOCALREAD; return hw_access; } /** * irdma_free_stag - free stag resource * @iwdev: irdma device * @stag: stag to free */ static void irdma_free_stag(struct irdma_device *iwdev, u32 stag) { u32 stag_idx; stag_idx = (stag & iwdev->rf->mr_stagmask) >> IRDMA_CQPSQ_STAG_IDX_S; irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_mrs, stag_idx); } /** * irdma_create_stag - create random stag * @iwdev: irdma device */ static u32 irdma_create_stag(struct irdma_device *iwdev) { u32 stag = 0; u32 stag_index = 0; u32 next_stag_index; u32 driver_key; u32 random; u8 consumer_key; int ret; get_random_bytes(&random, sizeof(random)); consumer_key = (u8)random; driver_key = random & ~iwdev->rf->mr_stagmask; next_stag_index = (random & iwdev->rf->mr_stagmask) >> 8; next_stag_index %= iwdev->rf->max_mr; ret = irdma_alloc_rsrc(iwdev->rf, iwdev->rf->allocated_mrs, iwdev->rf->max_mr, &stag_index, &next_stag_index); if (ret) return stag; stag = stag_index << IRDMA_CQPSQ_STAG_IDX_S; stag |= driver_key; stag += (u32)consumer_key; return stag; } /** * irdma_next_pbl_addr - Get next pbl address * @pbl: pointer to a pble * @pinfo: info pointer * @idx: index */ static inline u64 *irdma_next_pbl_addr(u64 *pbl, struct irdma_pble_info **pinfo, u32 *idx) { *idx += 1; if (!(*pinfo) || *idx != (*pinfo)->cnt) return ++pbl; *idx = 0; (*pinfo)++; return (*pinfo)->addr; } /** * irdma_copy_user_pgaddrs - copy user page address to pble's os locally * @iwmr: iwmr for IB's user page addresses * @pbl: ple pointer to save 1 level or 0 level pble * @level: indicated level 0, 1 or 2 */ static void irdma_copy_user_pgaddrs(struct irdma_mr *iwmr, u64 *pbl, enum irdma_pble_level level) { struct ib_umem *region = iwmr->region; struct irdma_pbl *iwpbl = &iwmr->iwpbl; struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; struct irdma_pble_info *pinfo; struct ib_block_iter biter; u32 idx = 0; u32 pbl_cnt = 0; pinfo = (level == PBLE_LEVEL_1) ? NULL : palloc->level2.leaf; if (iwmr->type == IRDMA_MEMREG_TYPE_QP) iwpbl->qp_mr.sq_page = sg_page(region->sgt_append.sgt.sgl); rdma_umem_for_each_dma_block(region, &biter, iwmr->page_size) { *pbl = rdma_block_iter_dma_address(&biter); if (++pbl_cnt == palloc->total_cnt) break; pbl = irdma_next_pbl_addr(pbl, &pinfo, &idx); } } /** * irdma_check_mem_contiguous - check if pbls stored in arr are contiguous * @arr: lvl1 pbl array * @npages: page count * @pg_size: page size * */ static bool irdma_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size) { u32 pg_idx; for (pg_idx = 0; pg_idx < npages; pg_idx++) { if ((*arr + (pg_size * pg_idx)) != arr[pg_idx]) return false; } return true; } /** * irdma_check_mr_contiguous - check if MR is physically contiguous * @palloc: pbl allocation struct * @pg_size: page size */ static bool irdma_check_mr_contiguous(struct irdma_pble_alloc *palloc, u32 pg_size) { struct irdma_pble_level2 *lvl2 = &palloc->level2; struct irdma_pble_info *leaf = lvl2->leaf; u64 *arr = NULL; u64 *start_addr = NULL; int i; bool ret; if (palloc->level == PBLE_LEVEL_1) { arr = palloc->level1.addr; ret = irdma_check_mem_contiguous(arr, palloc->total_cnt, pg_size); return ret; } start_addr = leaf->addr; for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) { arr = leaf->addr; if ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr) return false; ret = irdma_check_mem_contiguous(arr, leaf->cnt, pg_size); if (!ret) return false; } return true; } /** * irdma_setup_pbles - copy user pg address to pble's * @rf: RDMA PCI function * @iwmr: mr pointer for this memory registration * @lvl: requested pble levels */ static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr, u8 lvl) { struct irdma_pbl *iwpbl = &iwmr->iwpbl; struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; struct irdma_pble_info *pinfo; u64 *pbl; int status; enum irdma_pble_level level = PBLE_LEVEL_1; if (lvl) { status = irdma_get_pble(rf->pble_rsrc, palloc, iwmr->page_cnt, lvl); if (status) return status; iwpbl->pbl_allocated = true; level = palloc->level; pinfo = (level == PBLE_LEVEL_1) ? &palloc->level1 : palloc->level2.leaf; pbl = pinfo->addr; } else { pbl = iwmr->pgaddrmem; } irdma_copy_user_pgaddrs(iwmr, pbl, level); if (lvl) iwmr->pgaddrmem[0] = *pbl; return 0; } /** * irdma_handle_q_mem - handle memory for qp and cq * @iwdev: irdma device * @req: information for q memory management * @iwpbl: pble struct * @lvl: pble level mask */ static int irdma_handle_q_mem(struct irdma_device *iwdev, struct irdma_mem_reg_req *req, struct irdma_pbl *iwpbl, u8 lvl) { struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; struct irdma_mr *iwmr = iwpbl->iwmr; struct irdma_qp_mr *qpmr = &iwpbl->qp_mr; struct irdma_cq_mr *cqmr = &iwpbl->cq_mr; struct irdma_hmc_pble *hmc_p; u64 *arr = iwmr->pgaddrmem; u32 pg_size, total; int err = 0; bool ret = true; pg_size = iwmr->page_size; err = irdma_setup_pbles(iwdev->rf, iwmr, lvl); if (err) return err; if (lvl) arr = palloc->level1.addr; switch (iwmr->type) { case IRDMA_MEMREG_TYPE_QP: total = req->sq_pages + req->rq_pages; hmc_p = &qpmr->sq_pbl; qpmr->shadow = (dma_addr_t)arr[total]; if (lvl) { ret = irdma_check_mem_contiguous(arr, req->sq_pages, pg_size); if (ret) ret = irdma_check_mem_contiguous(&arr[req->sq_pages], req->rq_pages, pg_size); } if (!ret) { hmc_p->idx = palloc->level1.idx; hmc_p = &qpmr->rq_pbl; hmc_p->idx = palloc->level1.idx + req->sq_pages; } else { hmc_p->addr = arr[0]; hmc_p = &qpmr->rq_pbl; hmc_p->addr = arr[req->sq_pages]; } break; case IRDMA_MEMREG_TYPE_CQ: hmc_p = &cqmr->cq_pbl; if (!cqmr->split) cqmr->shadow = (dma_addr_t)arr[req->cq_pages]; if (lvl) ret = irdma_check_mem_contiguous(arr, req->cq_pages, pg_size); if (!ret) hmc_p->idx = palloc->level1.idx; else hmc_p->addr = arr[0]; break; default: ibdev_dbg(&iwdev->ibdev, "VERBS: MR type error\n"); err = -EINVAL; } if (lvl && ret) { irdma_free_pble(iwdev->rf->pble_rsrc, palloc); iwpbl->pbl_allocated = false; } return err; } /** * irdma_hw_alloc_mw - create the hw memory window * @iwdev: irdma device * @iwmr: pointer to memory window info */ static int irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr) { struct irdma_mw_alloc_info *info; struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd); struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; int status; cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); if (!cqp_request) return -ENOMEM; cqp_info = &cqp_request->info; info = &cqp_info->in.u.mw_alloc.info; memset(info, 0, sizeof(*info)); if (iwmr->ibmw.type == IB_MW_TYPE_1) info->mw_wide = true; info->page_size = PAGE_SIZE; info->mw_stag_index = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S; info->pd_id = iwpd->sc_pd.pd_id; info->remote_access = true; cqp_info->cqp_cmd = IRDMA_OP_MW_ALLOC; cqp_info->post_sq = 1; cqp_info->in.u.mw_alloc.dev = &iwdev->rf->sc_dev; cqp_info->in.u.mw_alloc.scratch = (uintptr_t)cqp_request; status = irdma_handle_cqp_op(iwdev->rf, cqp_request); irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); return status; } /** * irdma_alloc_mw - Allocate memory window * @ibmw: Memory Window * @udata: user data pointer */ static int irdma_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) { struct irdma_device *iwdev = to_iwdev(ibmw->device); struct irdma_mr *iwmr = to_iwmw(ibmw); int err_code; u32 stag; stag = irdma_create_stag(iwdev); if (!stag) return -ENOMEM; iwmr->stag = stag; ibmw->rkey = stag; err_code = irdma_hw_alloc_mw(iwdev, iwmr); if (err_code) { irdma_free_stag(iwdev, stag); return err_code; } return 0; } /** * irdma_dealloc_mw - Dealloc memory window * @ibmw: memory window structure. */ static int irdma_dealloc_mw(struct ib_mw *ibmw) { struct ib_pd *ibpd = ibmw->pd; struct irdma_pd *iwpd = to_iwpd(ibpd); struct irdma_mr *iwmr = to_iwmr((struct ib_mr *)ibmw); struct irdma_device *iwdev = to_iwdev(ibmw->device); struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; struct irdma_dealloc_stag_info *info; cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); if (!cqp_request) return -ENOMEM; cqp_info = &cqp_request->info; info = &cqp_info->in.u.dealloc_stag.info; memset(info, 0, sizeof(*info)); info->pd_id = iwpd->sc_pd.pd_id; info->stag_idx = ibmw->rkey >> IRDMA_CQPSQ_STAG_IDX_S; info->mr = false; cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG; cqp_info->post_sq = 1; cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev; cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request; irdma_handle_cqp_op(iwdev->rf, cqp_request); irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); irdma_free_stag(iwdev, iwmr->stag); return 0; } /** * irdma_hw_alloc_stag - cqp command to allocate stag * @iwdev: irdma device * @iwmr: irdma mr pointer */ static int irdma_hw_alloc_stag(struct irdma_device *iwdev, struct irdma_mr *iwmr) { struct irdma_allocate_stag_info *info; struct ib_pd *pd = iwmr->ibmr.pd; struct irdma_pd *iwpd = to_iwpd(pd); int status; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); if (!cqp_request) return -ENOMEM; cqp_info = &cqp_request->info; info = &cqp_info->in.u.alloc_stag.info; memset(info, 0, sizeof(*info)); info->page_size = PAGE_SIZE; info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S; info->pd_id = iwpd->sc_pd.pd_id; info->total_len = iwmr->len; info->all_memory = pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY; info->remote_access = true; cqp_info->cqp_cmd = IRDMA_OP_ALLOC_STAG; cqp_info->post_sq = 1; cqp_info->in.u.alloc_stag.dev = &iwdev->rf->sc_dev; cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request; status = irdma_handle_cqp_op(iwdev->rf, cqp_request); irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); return status; } /** * irdma_alloc_mr - register stag for fast memory registration * @pd: ibpd pointer * @mr_type: memory for stag registrion * @max_num_sg: man number of pages */ static struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, u32 max_num_sg) { struct irdma_device *iwdev = to_iwdev(pd->device); struct irdma_pble_alloc *palloc; struct irdma_pbl *iwpbl; struct irdma_mr *iwmr; u32 stag; int err_code; iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL); if (!iwmr) return ERR_PTR(-ENOMEM); stag = irdma_create_stag(iwdev); if (!stag) { err_code = -ENOMEM; goto err; } iwmr->stag = stag; iwmr->ibmr.rkey = stag; iwmr->ibmr.lkey = stag; iwmr->ibmr.pd = pd; iwmr->ibmr.device = pd->device; iwpbl = &iwmr->iwpbl; iwpbl->iwmr = iwmr; iwmr->type = IRDMA_MEMREG_TYPE_MEM; palloc = &iwpbl->pble_alloc; iwmr->page_cnt = max_num_sg; /* Use system PAGE_SIZE as the sg page sizes are unknown at this point */ iwmr->len = max_num_sg * PAGE_SIZE; err_code = irdma_get_pble(iwdev->rf->pble_rsrc, palloc, iwmr->page_cnt, false); if (err_code) goto err_get_pble; err_code = irdma_hw_alloc_stag(iwdev, iwmr); if (err_code) goto err_alloc_stag; iwpbl->pbl_allocated = true; return &iwmr->ibmr; err_alloc_stag: irdma_free_pble(iwdev->rf->pble_rsrc, palloc); err_get_pble: irdma_free_stag(iwdev, stag); err: kfree(iwmr); return ERR_PTR(err_code); } /** * irdma_set_page - populate pbl list for fmr * @ibmr: ib mem to access iwarp mr pointer * @addr: page dma address fro pbl list */ static int irdma_set_page(struct ib_mr *ibmr, u64 addr) { struct irdma_mr *iwmr = to_iwmr(ibmr); struct irdma_pbl *iwpbl = &iwmr->iwpbl; struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; u64 *pbl; if (unlikely(iwmr->npages == iwmr->page_cnt)) return -ENOMEM; if (palloc->level == PBLE_LEVEL_2) { struct irdma_pble_info *palloc_info = palloc->level2.leaf + (iwmr->npages >> PBLE_512_SHIFT); palloc_info->addr[iwmr->npages & (PBLE_PER_PAGE - 1)] = addr; } else { pbl = palloc->level1.addr; pbl[iwmr->npages] = addr; } iwmr->npages++; return 0; } /** * irdma_map_mr_sg - map of sg list for fmr * @ibmr: ib mem to access iwarp mr pointer * @sg: scatter gather list * @sg_nents: number of sg pages * @sg_offset: scatter gather list for fmr */ static int irdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset) { struct irdma_mr *iwmr = to_iwmr(ibmr); iwmr->npages = 0; return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, irdma_set_page); } /** * irdma_hwreg_mr - send cqp command for memory registration * @iwdev: irdma device * @iwmr: irdma mr pointer * @access: access for MR */ static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr, u16 access) { struct irdma_pbl *iwpbl = &iwmr->iwpbl; struct irdma_reg_ns_stag_info *stag_info; struct ib_pd *pd = iwmr->ibmr.pd; struct irdma_pd *iwpd = to_iwpd(pd); struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; int ret; cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); if (!cqp_request) return -ENOMEM; cqp_info = &cqp_request->info; stag_info = &cqp_info->in.u.mr_reg_non_shared.info; memset(stag_info, 0, sizeof(*stag_info)); stag_info->va = iwpbl->user_base; stag_info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S; stag_info->stag_key = (u8)iwmr->stag; stag_info->total_len = iwmr->len; stag_info->access_rights = irdma_get_mr_access(access); stag_info->pd_id = iwpd->sc_pd.pd_id; stag_info->all_memory = pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY; if (stag_info->access_rights & IRDMA_ACCESS_FLAGS_ZERO_BASED) stag_info->addr_type = IRDMA_ADDR_TYPE_ZERO_BASED; else stag_info->addr_type = IRDMA_ADDR_TYPE_VA_BASED; stag_info->page_size = iwmr->page_size; if (iwpbl->pbl_allocated) { if (palloc->level == PBLE_LEVEL_1) { stag_info->first_pm_pbl_index = palloc->level1.idx; stag_info->chunk_size = 1; } else { stag_info->first_pm_pbl_index = palloc->level2.root.idx; stag_info->chunk_size = 3; } } else { stag_info->reg_addr_pa = iwmr->pgaddrmem[0]; } cqp_info->cqp_cmd = IRDMA_OP_MR_REG_NON_SHARED; cqp_info->post_sq = 1; cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->rf->sc_dev; cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request; ret = irdma_handle_cqp_op(iwdev->rf, cqp_request); irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); return ret; } static int irdma_reg_user_mr_type_mem(struct irdma_mr *iwmr, int access) { struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device); struct irdma_pbl *iwpbl = &iwmr->iwpbl; u32 stag; u8 lvl; int err; lvl = iwmr->page_cnt != 1 ? PBLE_LEVEL_1 | PBLE_LEVEL_2 : PBLE_LEVEL_0; err = irdma_setup_pbles(iwdev->rf, iwmr, lvl); if (err) return err; if (lvl) { err = irdma_check_mr_contiguous(&iwpbl->pble_alloc, iwmr->page_size); if (err) { irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc); iwpbl->pbl_allocated = false; } } stag = irdma_create_stag(iwdev); if (!stag) { err = -ENOMEM; goto free_pble; } iwmr->stag = stag; iwmr->ibmr.rkey = stag; iwmr->ibmr.lkey = stag; err = irdma_hwreg_mr(iwdev, iwmr, access); if (err) goto err_hwreg; return 0; err_hwreg: irdma_free_stag(iwdev, stag); free_pble: if (iwpbl->pble_alloc.level != PBLE_LEVEL_0 && iwpbl->pbl_allocated) irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc); return err; } static struct irdma_mr *irdma_alloc_iwmr(struct ib_umem *region, struct ib_pd *pd, u64 virt, enum irdma_memreg_type reg_type) { struct irdma_device *iwdev = to_iwdev(pd->device); struct irdma_pbl *iwpbl; struct irdma_mr *iwmr; unsigned long pgsz_bitmap; iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL); if (!iwmr) return ERR_PTR(-ENOMEM); iwpbl = &iwmr->iwpbl; iwpbl->iwmr = iwmr; iwmr->region = region; iwmr->ibmr.pd = pd; iwmr->ibmr.device = pd->device; iwmr->ibmr.iova = virt; iwmr->type = reg_type; pgsz_bitmap = (reg_type == IRDMA_MEMREG_TYPE_MEM) ? iwdev->rf->sc_dev.hw_attrs.page_size_cap : PAGE_SIZE; iwmr->page_size = ib_umem_find_best_pgsz(region, pgsz_bitmap, virt); if (unlikely(!iwmr->page_size)) { kfree(iwmr); return ERR_PTR(-EOPNOTSUPP); } iwmr->len = region->length; iwpbl->user_base = virt; iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size); return iwmr; } static void irdma_free_iwmr(struct irdma_mr *iwmr) { kfree(iwmr); } static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req, struct ib_udata *udata, struct irdma_mr *iwmr) { struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device); struct irdma_pbl *iwpbl = &iwmr->iwpbl; struct irdma_ucontext *ucontext = NULL; unsigned long flags; u32 total; int err; u8 lvl; total = req.sq_pages + req.rq_pages + 1; if (total > iwmr->page_cnt) return -EINVAL; total = req.sq_pages + req.rq_pages; lvl = total > 2 ? PBLE_LEVEL_1 : PBLE_LEVEL_0; err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl); if (err) return err; ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext); spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list); iwpbl->on_list = true; spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); return 0; } static int irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req, struct ib_udata *udata, struct irdma_mr *iwmr) { struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device); struct irdma_pbl *iwpbl = &iwmr->iwpbl; struct irdma_ucontext *ucontext = NULL; u8 shadow_pgcnt = 1; unsigned long flags; u32 total; int err; u8 lvl; if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE) shadow_pgcnt = 0; total = req.cq_pages + shadow_pgcnt; if (total > iwmr->page_cnt) return -EINVAL; lvl = req.cq_pages > 1 ? PBLE_LEVEL_1 : PBLE_LEVEL_0; err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl); if (err) return err; ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext); spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list); iwpbl->on_list = true; spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); return 0; } /** * irdma_reg_user_mr - Register a user memory region * @pd: ptr of pd * @start: virtual start address * @len: length of mr * @virt: virtual address * @access: access of mr * @udata: user data */ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len, u64 virt, int access, struct ib_udata *udata) { #define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages) struct irdma_device *iwdev = to_iwdev(pd->device); struct irdma_mem_reg_req req = {}; struct ib_umem *region = NULL; struct irdma_mr *iwmr = NULL; int err; if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size) return ERR_PTR(-EINVAL); if (udata->inlen < IRDMA_MEM_REG_MIN_REQ_LEN) return ERR_PTR(-EINVAL); region = ib_umem_get(pd->device, start, len, access); if (IS_ERR(region)) { ibdev_dbg(&iwdev->ibdev, "VERBS: Failed to create ib_umem region\n"); return (struct ib_mr *)region; } if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) { ib_umem_release(region); return ERR_PTR(-EFAULT); } iwmr = irdma_alloc_iwmr(region, pd, virt, req.reg_type); if (IS_ERR(iwmr)) { ib_umem_release(region); return (struct ib_mr *)iwmr; } switch (req.reg_type) { case IRDMA_MEMREG_TYPE_QP: err = irdma_reg_user_mr_type_qp(req, udata, iwmr); if (err) goto error; break; case IRDMA_MEMREG_TYPE_CQ: err = irdma_reg_user_mr_type_cq(req, udata, iwmr); if (err) goto error; break; case IRDMA_MEMREG_TYPE_MEM: err = irdma_reg_user_mr_type_mem(iwmr, access); if (err) goto error; break; default: err = -EINVAL; goto error; } return &iwmr->ibmr; error: ib_umem_release(region); irdma_free_iwmr(iwmr); return ERR_PTR(err); } static struct ib_mr *irdma_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start, u64 len, u64 virt, int fd, int access, struct ib_udata *udata) { struct irdma_device *iwdev = to_iwdev(pd->device); struct ib_umem_dmabuf *umem_dmabuf; struct irdma_mr *iwmr; int err; if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size) return ERR_PTR(-EINVAL); umem_dmabuf = ib_umem_dmabuf_get_pinned(pd->device, start, len, fd, access); if (IS_ERR(umem_dmabuf)) { err = PTR_ERR(umem_dmabuf); ibdev_dbg(&iwdev->ibdev, "Failed to get dmabuf umem[%d]\n", err); return ERR_PTR(err); } iwmr = irdma_alloc_iwmr(&umem_dmabuf->umem, pd, virt, IRDMA_MEMREG_TYPE_MEM); if (IS_ERR(iwmr)) { err = PTR_ERR(iwmr); goto err_release; } err = irdma_reg_user_mr_type_mem(iwmr, access); if (err) goto err_iwmr; return &iwmr->ibmr; err_iwmr: irdma_free_iwmr(iwmr); err_release: ib_umem_release(&umem_dmabuf->umem); return ERR_PTR(err); } /** * irdma_reg_phys_mr - register kernel physical memory * @pd: ibpd pointer * @addr: physical address of memory to register * @size: size of memory to register * @access: Access rights * @iova_start: start of virtual address for physical buffers */ struct ib_mr *irdma_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int access, u64 *iova_start) { struct irdma_device *iwdev = to_iwdev(pd->device); struct irdma_pbl *iwpbl; struct irdma_mr *iwmr; u32 stag; int ret; iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL); if (!iwmr) return ERR_PTR(-ENOMEM); iwmr->ibmr.pd = pd; iwmr->ibmr.device = pd->device; iwpbl = &iwmr->iwpbl; iwpbl->iwmr = iwmr; iwmr->type = IRDMA_MEMREG_TYPE_MEM; iwpbl->user_base = *iova_start; stag = irdma_create_stag(iwdev); if (!stag) { ret = -ENOMEM; goto err; } iwmr->stag = stag; iwmr->ibmr.iova = *iova_start; iwmr->ibmr.rkey = stag; iwmr->ibmr.lkey = stag; iwmr->page_cnt = 1; iwmr->pgaddrmem[0] = addr; iwmr->len = size; iwmr->page_size = SZ_4K; ret = irdma_hwreg_mr(iwdev, iwmr, access); if (ret) { irdma_free_stag(iwdev, stag); goto err; } return &iwmr->ibmr; err: kfree(iwmr); return ERR_PTR(ret); } /** * irdma_get_dma_mr - register physical mem * @pd: ptr of pd * @acc: access for memory */ static struct ib_mr *irdma_get_dma_mr(struct ib_pd *pd, int acc) { u64 kva = 0; return irdma_reg_phys_mr(pd, 0, 0, acc, &kva); } /** * irdma_del_memlist - Deleting pbl list entries for CQ/QP * @iwmr: iwmr for IB's user page addresses * @ucontext: ptr to user context */ static void irdma_del_memlist(struct irdma_mr *iwmr, struct irdma_ucontext *ucontext) { struct irdma_pbl *iwpbl = &iwmr->iwpbl; unsigned long flags; switch (iwmr->type) { case IRDMA_MEMREG_TYPE_CQ: spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); if (iwpbl->on_list) { iwpbl->on_list = false; list_del(&iwpbl->list); } spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); break; case IRDMA_MEMREG_TYPE_QP: spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); if (iwpbl->on_list) { iwpbl->on_list = false; list_del(&iwpbl->list); } spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); break; default: break; } } /** * irdma_dereg_mr - deregister mr * @ib_mr: mr ptr for dereg * @udata: user data */ static int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) { struct ib_pd *ibpd = ib_mr->pd; struct irdma_pd *iwpd = to_iwpd(ibpd); struct irdma_mr *iwmr = to_iwmr(ib_mr); struct irdma_device *iwdev = to_iwdev(ib_mr->device); struct irdma_dealloc_stag_info *info; struct irdma_pbl *iwpbl = &iwmr->iwpbl; struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; int status; if (iwmr->type != IRDMA_MEMREG_TYPE_MEM) { if (iwmr->region) { struct irdma_ucontext *ucontext; ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext); irdma_del_memlist(iwmr, ucontext); } goto done; } cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); if (!cqp_request) return -ENOMEM; cqp_info = &cqp_request->info; info = &cqp_info->in.u.dealloc_stag.info; memset(info, 0, sizeof(*info)); info->pd_id = iwpd->sc_pd.pd_id; info->stag_idx = ib_mr->rkey >> IRDMA_CQPSQ_STAG_IDX_S; info->mr = true; if (iwpbl->pbl_allocated) info->dealloc_pbl = true; cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG; cqp_info->post_sq = 1; cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev; cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request; status = irdma_handle_cqp_op(iwdev->rf, cqp_request); irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); if (status) return status; irdma_free_stag(iwdev, iwmr->stag); done: if (iwpbl->pbl_allocated) irdma_free_pble(iwdev->rf->pble_rsrc, palloc); ib_umem_release(iwmr->region); kfree(iwmr); return 0; } /** * irdma_post_send - kernel application wr * @ibqp: qp ptr for wr * @ib_wr: work request ptr * @bad_wr: return of bad wr if err */ static int irdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *ib_wr, const struct ib_send_wr **bad_wr) { struct irdma_qp *iwqp; struct irdma_qp_uk *ukqp; struct irdma_sc_dev *dev; struct irdma_post_sq_info info; int err = 0; unsigned long flags; bool inv_stag; struct irdma_ah *ah; iwqp = to_iwqp(ibqp); ukqp = &iwqp->sc_qp.qp_uk; dev = &iwqp->iwdev->rf->sc_dev; spin_lock_irqsave(&iwqp->lock, flags); while (ib_wr) { memset(&info, 0, sizeof(info)); inv_stag = false; info.wr_id = (ib_wr->wr_id); if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all) info.signaled = true; if (ib_wr->send_flags & IB_SEND_FENCE) info.read_fence = true; switch (ib_wr->opcode) { case IB_WR_SEND_WITH_IMM: if (ukqp->qp_caps & IRDMA_SEND_WITH_IMM) { info.imm_data_valid = true; info.imm_data = ntohl(ib_wr->ex.imm_data); } else { err = -EINVAL; break; } fallthrough; case IB_WR_SEND: case IB_WR_SEND_WITH_INV: if (ib_wr->opcode == IB_WR_SEND || ib_wr->opcode == IB_WR_SEND_WITH_IMM) { if (ib_wr->send_flags & IB_SEND_SOLICITED) info.op_type = IRDMA_OP_TYPE_SEND_SOL; else info.op_type = IRDMA_OP_TYPE_SEND; } else { if (ib_wr->send_flags & IB_SEND_SOLICITED) info.op_type = IRDMA_OP_TYPE_SEND_SOL_INV; else info.op_type = IRDMA_OP_TYPE_SEND_INV; info.stag_to_inv = ib_wr->ex.invalidate_rkey; } info.op.send.num_sges = ib_wr->num_sge; info.op.send.sg_list = ib_wr->sg_list; if (iwqp->ibqp.qp_type == IB_QPT_UD || iwqp->ibqp.qp_type == IB_QPT_GSI) { ah = to_iwah(ud_wr(ib_wr)->ah); info.op.send.ah_id = ah->sc_ah.ah_info.ah_idx; info.op.send.qkey = ud_wr(ib_wr)->remote_qkey; info.op.send.dest_qp = ud_wr(ib_wr)->remote_qpn; } if (ib_wr->send_flags & IB_SEND_INLINE) err = irdma_uk_inline_send(ukqp, &info, false); else err = irdma_uk_send(ukqp, &info, false); break; case IB_WR_RDMA_WRITE_WITH_IMM: if (ukqp->qp_caps & IRDMA_WRITE_WITH_IMM) { info.imm_data_valid = true; info.imm_data = ntohl(ib_wr->ex.imm_data); } else { err = -EINVAL; break; } fallthrough; case IB_WR_RDMA_WRITE: if (ib_wr->send_flags & IB_SEND_SOLICITED) info.op_type = IRDMA_OP_TYPE_RDMA_WRITE_SOL; else info.op_type = IRDMA_OP_TYPE_RDMA_WRITE; info.op.rdma_write.num_lo_sges = ib_wr->num_sge; info.op.rdma_write.lo_sg_list = ib_wr->sg_list; info.op.rdma_write.rem_addr.addr = rdma_wr(ib_wr)->remote_addr; info.op.rdma_write.rem_addr.lkey = rdma_wr(ib_wr)->rkey; if (ib_wr->send_flags & IB_SEND_INLINE) err = irdma_uk_inline_rdma_write(ukqp, &info, false); else err = irdma_uk_rdma_write(ukqp, &info, false); break; case IB_WR_RDMA_READ_WITH_INV: inv_stag = true; fallthrough; case IB_WR_RDMA_READ: if (ib_wr->num_sge > dev->hw_attrs.uk_attrs.max_hw_read_sges) { err = -EINVAL; break; } info.op_type = IRDMA_OP_TYPE_RDMA_READ; info.op.rdma_read.rem_addr.addr = rdma_wr(ib_wr)->remote_addr; info.op.rdma_read.rem_addr.lkey = rdma_wr(ib_wr)->rkey; info.op.rdma_read.lo_sg_list = (void *)ib_wr->sg_list; info.op.rdma_read.num_lo_sges = ib_wr->num_sge; err = irdma_uk_rdma_read(ukqp, &info, inv_stag, false); break; case IB_WR_LOCAL_INV: info.op_type = IRDMA_OP_TYPE_INV_STAG; info.local_fence = info.read_fence; info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey; err = irdma_uk_stag_local_invalidate(ukqp, &info, true); break; case IB_WR_REG_MR: { struct irdma_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr); struct irdma_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc; struct irdma_fast_reg_stag_info stag_info = {}; stag_info.signaled = info.signaled; stag_info.read_fence = info.read_fence; stag_info.access_rights = irdma_get_mr_access(reg_wr(ib_wr)->access); stag_info.stag_key = reg_wr(ib_wr)->key & 0xff; stag_info.stag_idx = reg_wr(ib_wr)->key >> 8; stag_info.page_size = reg_wr(ib_wr)->mr->page_size; stag_info.wr_id = ib_wr->wr_id; stag_info.addr_type = IRDMA_ADDR_TYPE_VA_BASED; stag_info.va = (void *)(uintptr_t)iwmr->ibmr.iova; stag_info.total_len = iwmr->ibmr.length; stag_info.reg_addr_pa = *palloc->level1.addr; stag_info.first_pm_pbl_index = palloc->level1.idx; stag_info.local_fence = ib_wr->send_flags & IB_SEND_FENCE; if (iwmr->npages > IRDMA_MIN_PAGES_PER_FMR) stag_info.chunk_size = 1; err = irdma_sc_mr_fast_register(&iwqp->sc_qp, &stag_info, true); break; } default: err = -EINVAL; ibdev_dbg(&iwqp->iwdev->ibdev, "VERBS: upost_send bad opcode = 0x%x\n", ib_wr->opcode); break; } if (err) break; ib_wr = ib_wr->next; } if (!iwqp->flush_issued) { if (iwqp->hw_iwarp_state <= IRDMA_QP_STATE_RTS) irdma_uk_qp_post_wr(ukqp); spin_unlock_irqrestore(&iwqp->lock, flags); } else { spin_unlock_irqrestore(&iwqp->lock, flags); mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS)); } if (err) *bad_wr = ib_wr; return err; } /** * irdma_post_recv - post receive wr for kernel application * @ibqp: ib qp pointer * @ib_wr: work request for receive * @bad_wr: bad wr caused an error */ static int irdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *ib_wr, const struct ib_recv_wr **bad_wr) { struct irdma_qp *iwqp; struct irdma_qp_uk *ukqp; struct irdma_post_rq_info post_recv = {}; unsigned long flags; int err = 0; iwqp = to_iwqp(ibqp); ukqp = &iwqp->sc_qp.qp_uk; spin_lock_irqsave(&iwqp->lock, flags); while (ib_wr) { post_recv.num_sges = ib_wr->num_sge; post_recv.wr_id = ib_wr->wr_id; post_recv.sg_list = ib_wr->sg_list; err = irdma_uk_post_receive(ukqp, &post_recv); if (err) { ibdev_dbg(&iwqp->iwdev->ibdev, "VERBS: post_recv err %d\n", err); goto out; } ib_wr = ib_wr->next; } out: spin_unlock_irqrestore(&iwqp->lock, flags); if (iwqp->flush_issued) mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS)); if (err) *bad_wr = ib_wr; return err; } /** * irdma_flush_err_to_ib_wc_status - return change flush error code to IB status * @opcode: iwarp flush code */ static enum ib_wc_status irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode opcode) { switch (opcode) { case FLUSH_PROT_ERR: return IB_WC_LOC_PROT_ERR; case FLUSH_REM_ACCESS_ERR: return IB_WC_REM_ACCESS_ERR; case FLUSH_LOC_QP_OP_ERR: return IB_WC_LOC_QP_OP_ERR; case FLUSH_REM_OP_ERR: return IB_WC_REM_OP_ERR; case FLUSH_LOC_LEN_ERR: return IB_WC_LOC_LEN_ERR; case FLUSH_GENERAL_ERR: return IB_WC_WR_FLUSH_ERR; case FLUSH_RETRY_EXC_ERR: return IB_WC_RETRY_EXC_ERR; case FLUSH_MW_BIND_ERR: return IB_WC_MW_BIND_ERR; case FLUSH_REM_INV_REQ_ERR: return IB_WC_REM_INV_REQ_ERR; case FLUSH_FATAL_ERR: default: return IB_WC_FATAL_ERR; } } /** * irdma_process_cqe - process cqe info * @entry: processed cqe * @cq_poll_info: cqe info */ static void irdma_process_cqe(struct ib_wc *entry, struct irdma_cq_poll_info *cq_poll_info) { struct irdma_sc_qp *qp; entry->wc_flags = 0; entry->pkey_index = 0; entry->wr_id = cq_poll_info->wr_id; qp = cq_poll_info->qp_handle; entry->qp = qp->qp_uk.back_qp; if (cq_poll_info->error) { entry->status = (cq_poll_info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) ? irdma_flush_err_to_ib_wc_status(cq_poll_info->minor_err) : IB_WC_GENERAL_ERR; entry->vendor_err = cq_poll_info->major_err << 16 | cq_poll_info->minor_err; } else { entry->status = IB_WC_SUCCESS; if (cq_poll_info->imm_valid) { entry->ex.imm_data = htonl(cq_poll_info->imm_data); entry->wc_flags |= IB_WC_WITH_IMM; } if (cq_poll_info->ud_smac_valid) { ether_addr_copy(entry->smac, cq_poll_info->ud_smac); entry->wc_flags |= IB_WC_WITH_SMAC; } if (cq_poll_info->ud_vlan_valid) { u16 vlan = cq_poll_info->ud_vlan & VLAN_VID_MASK; entry->sl = cq_poll_info->ud_vlan >> VLAN_PRIO_SHIFT; if (vlan) { entry->vlan_id = vlan; entry->wc_flags |= IB_WC_WITH_VLAN; } } else { entry->sl = 0; } } if (cq_poll_info->q_type == IRDMA_CQE_QTYPE_SQ) { set_ib_wc_op_sq(cq_poll_info, entry); } else { set_ib_wc_op_rq(cq_poll_info, entry, qp->qp_uk.qp_caps & IRDMA_SEND_WITH_IMM); if (qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_UD && cq_poll_info->stag_invalid_set) { entry->ex.invalidate_rkey = cq_poll_info->inv_stag; entry->wc_flags |= IB_WC_WITH_INVALIDATE; } } if (qp->qp_uk.qp_type == IRDMA_QP_TYPE_ROCE_UD) { entry->src_qp = cq_poll_info->ud_src_qpn; entry->slid = 0; entry->wc_flags |= (IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE); entry->network_hdr_type = cq_poll_info->ipv4 ? RDMA_NETWORK_IPV4 : RDMA_NETWORK_IPV6; } else { entry->src_qp = cq_poll_info->qp_id; } entry->byte_len = cq_poll_info->bytes_xfered; } /** * irdma_poll_one - poll one entry of the CQ * @ukcq: ukcq to poll * @cur_cqe: current CQE info to be filled in * @entry: ibv_wc object to be filled for non-extended CQ or NULL for extended CQ * * Returns the internal irdma device error code or 0 on success */ static inline int irdma_poll_one(struct irdma_cq_uk *ukcq, struct irdma_cq_poll_info *cur_cqe, struct ib_wc *entry) { int ret = irdma_uk_cq_poll_cmpl(ukcq, cur_cqe); if (ret) return ret; irdma_process_cqe(entry, cur_cqe); return 0; } /** * __irdma_poll_cq - poll cq for completion (kernel apps) * @iwcq: cq to poll * @num_entries: number of entries to poll * @entry: wr of a completed entry */ static int __irdma_poll_cq(struct irdma_cq *iwcq, int num_entries, struct ib_wc *entry) { struct list_head *tmp_node, *list_node; struct irdma_cq_buf *last_buf = NULL; struct irdma_cq_poll_info *cur_cqe = &iwcq->cur_cqe; struct irdma_cq_buf *cq_buf; int ret; struct irdma_device *iwdev; struct irdma_cq_uk *ukcq; bool cq_new_cqe = false; int resized_bufs = 0; int npolled = 0; iwdev = to_iwdev(iwcq->ibcq.device); ukcq = &iwcq->sc_cq.cq_uk; /* go through the list of previously resized CQ buffers */ list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) { cq_buf = container_of(list_node, struct irdma_cq_buf, list); while (npolled < num_entries) { ret = irdma_poll_one(&cq_buf->cq_uk, cur_cqe, entry + npolled); if (!ret) { ++npolled; cq_new_cqe = true; continue; } if (ret == -ENOENT) break; /* QP using the CQ is destroyed. Skip reporting this CQE */ if (ret == -EFAULT) { cq_new_cqe = true; continue; } goto error; } /* save the resized CQ buffer which received the last cqe */ if (cq_new_cqe) last_buf = cq_buf; cq_new_cqe = false; } /* check the current CQ for new cqes */ while (npolled < num_entries) { ret = irdma_poll_one(ukcq, cur_cqe, entry + npolled); if (ret == -ENOENT) { ret = irdma_generated_cmpls(iwcq, cur_cqe); if (!ret) irdma_process_cqe(entry + npolled, cur_cqe); } if (!ret) { ++npolled; cq_new_cqe = true; continue; } if (ret == -ENOENT) break; /* QP using the CQ is destroyed. Skip reporting this CQE */ if (ret == -EFAULT) { cq_new_cqe = true; continue; } goto error; } if (cq_new_cqe) /* all previous CQ resizes are complete */ resized_bufs = irdma_process_resize_list(iwcq, iwdev, NULL); else if (last_buf) /* only CQ resizes up to the last_buf are complete */ resized_bufs = irdma_process_resize_list(iwcq, iwdev, last_buf); if (resized_bufs) /* report to the HW the number of complete CQ resizes */ irdma_uk_cq_set_resized_cnt(ukcq, resized_bufs); return npolled; error: ibdev_dbg(&iwdev->ibdev, "%s: Error polling CQ, irdma_err: %d\n", __func__, ret); return ret; } /** * irdma_poll_cq - poll cq for completion (kernel apps) * @ibcq: cq to poll * @num_entries: number of entries to poll * @entry: wr of a completed entry */ static int irdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) { struct irdma_cq *iwcq; unsigned long flags; int ret; iwcq = to_iwcq(ibcq); spin_lock_irqsave(&iwcq->lock, flags); ret = __irdma_poll_cq(iwcq, num_entries, entry); spin_unlock_irqrestore(&iwcq->lock, flags); return ret; } /** * irdma_req_notify_cq - arm cq kernel application * @ibcq: cq to arm * @notify_flags: notofication flags */ static int irdma_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags) { struct irdma_cq *iwcq; struct irdma_cq_uk *ukcq; unsigned long flags; enum irdma_cmpl_notify cq_notify; bool promo_event = false; int ret = 0; cq_notify = notify_flags == IB_CQ_SOLICITED ? IRDMA_CQ_COMPL_SOLICITED : IRDMA_CQ_COMPL_EVENT; iwcq = to_iwcq(ibcq); ukcq = &iwcq->sc_cq.cq_uk; spin_lock_irqsave(&iwcq->lock, flags); /* Only promote to arm the CQ for any event if the last arm event was solicited. */ if (iwcq->last_notify == IRDMA_CQ_COMPL_SOLICITED && notify_flags != IB_CQ_SOLICITED) promo_event = true; if (!atomic_cmpxchg(&iwcq->armed, 0, 1) || promo_event) { iwcq->last_notify = cq_notify; irdma_uk_cq_request_notification(ukcq, cq_notify); } if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) && (!irdma_cq_empty(iwcq) || !list_empty(&iwcq->cmpl_generated))) ret = 1; spin_unlock_irqrestore(&iwcq->lock, flags); return ret; } static int irdma_roce_port_immutable(struct ib_device *ibdev, u32 port_num, struct ib_port_immutable *immutable) { struct ib_port_attr attr; int err; immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; err = ib_query_port(ibdev, port_num, &attr); if (err) return err; immutable->max_mad_size = IB_MGMT_MAD_SIZE; immutable->pkey_tbl_len = attr.pkey_tbl_len; immutable->gid_tbl_len = attr.gid_tbl_len; return 0; } static int irdma_iw_port_immutable(struct ib_device *ibdev, u32 port_num, struct ib_port_immutable *immutable) { struct ib_port_attr attr; int err; immutable->core_cap_flags = RDMA_CORE_PORT_IWARP; err = ib_query_port(ibdev, port_num, &attr); if (err) return err; immutable->gid_tbl_len = attr.gid_tbl_len; return 0; } static const struct rdma_stat_desc irdma_hw_stat_names[] = { /* gen1 - 32-bit */ [IRDMA_HW_STAT_INDEX_IP4RXDISCARD].name = "ip4InDiscards", [IRDMA_HW_STAT_INDEX_IP4RXTRUNC].name = "ip4InTruncatedPkts", [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE].name = "ip4OutNoRoutes", [IRDMA_HW_STAT_INDEX_IP6RXDISCARD].name = "ip6InDiscards", [IRDMA_HW_STAT_INDEX_IP6RXTRUNC].name = "ip6InTruncatedPkts", [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE].name = "ip6OutNoRoutes", [IRDMA_HW_STAT_INDEX_TCPRTXSEG].name = "tcpRetransSegs", [IRDMA_HW_STAT_INDEX_TCPRXOPTERR].name = "tcpInOptErrors", [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR].name = "tcpInProtoErrors", [IRDMA_HW_STAT_INDEX_RXVLANERR].name = "rxVlanErrors", /* gen1 - 64-bit */ [IRDMA_HW_STAT_INDEX_IP4RXOCTS].name = "ip4InOctets", [IRDMA_HW_STAT_INDEX_IP4RXPKTS].name = "ip4InPkts", [IRDMA_HW_STAT_INDEX_IP4RXFRAGS].name = "ip4InReasmRqd", [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS].name = "ip4InMcastPkts", [IRDMA_HW_STAT_INDEX_IP4TXOCTS].name = "ip4OutOctets", [IRDMA_HW_STAT_INDEX_IP4TXPKTS].name = "ip4OutPkts", [IRDMA_HW_STAT_INDEX_IP4TXFRAGS].name = "ip4OutSegRqd", [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS].name = "ip4OutMcastPkts", [IRDMA_HW_STAT_INDEX_IP6RXOCTS].name = "ip6InOctets", [IRDMA_HW_STAT_INDEX_IP6RXPKTS].name = "ip6InPkts", [IRDMA_HW_STAT_INDEX_IP6RXFRAGS].name = "ip6InReasmRqd", [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS].name = "ip6InMcastPkts", [IRDMA_HW_STAT_INDEX_IP6TXOCTS].name = "ip6OutOctets", [IRDMA_HW_STAT_INDEX_IP6TXPKTS].name = "ip6OutPkts", [IRDMA_HW_STAT_INDEX_IP6TXFRAGS].name = "ip6OutSegRqd", [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS].name = "ip6OutMcastPkts", [IRDMA_HW_STAT_INDEX_TCPRXSEGS].name = "tcpInSegs", [IRDMA_HW_STAT_INDEX_TCPTXSEG].name = "tcpOutSegs", [IRDMA_HW_STAT_INDEX_RDMARXRDS].name = "iwInRdmaReads", [IRDMA_HW_STAT_INDEX_RDMARXSNDS].name = "iwInRdmaSends", [IRDMA_HW_STAT_INDEX_RDMARXWRS].name = "iwInRdmaWrites", [IRDMA_HW_STAT_INDEX_RDMATXRDS].name = "iwOutRdmaReads", [IRDMA_HW_STAT_INDEX_RDMATXSNDS].name = "iwOutRdmaSends", [IRDMA_HW_STAT_INDEX_RDMATXWRS].name = "iwOutRdmaWrites", [IRDMA_HW_STAT_INDEX_RDMAVBND].name = "iwRdmaBnd", [IRDMA_HW_STAT_INDEX_RDMAVINV].name = "iwRdmaInv", /* gen2 - 32-bit */ [IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED].name = "cnpHandled", [IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED].name = "cnpIgnored", [IRDMA_HW_STAT_INDEX_TXNPCNPSENT].name = "cnpSent", /* gen2 - 64-bit */ [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS].name = "ip4InMcastOctets", [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS].name = "ip4OutMcastOctets", [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS].name = "ip6InMcastOctets", [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS].name = "ip6OutMcastOctets", [IRDMA_HW_STAT_INDEX_UDPRXPKTS].name = "RxUDP", [IRDMA_HW_STAT_INDEX_UDPTXPKTS].name = "TxUDP", [IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS].name = "RxECNMrkd", }; static void irdma_get_dev_fw_str(struct ib_device *dev, char *str) { struct irdma_device *iwdev = to_iwdev(dev); snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u", irdma_fw_major_ver(&iwdev->rf->sc_dev), irdma_fw_minor_ver(&iwdev->rf->sc_dev)); } /** * irdma_alloc_hw_port_stats - Allocate a hw stats structure * @ibdev: device pointer from stack * @port_num: port number */ static struct rdma_hw_stats *irdma_alloc_hw_port_stats(struct ib_device *ibdev, u32 port_num) { struct irdma_device *iwdev = to_iwdev(ibdev); struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; int num_counters = dev->hw_attrs.max_stat_idx; unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN; return rdma_alloc_hw_stats_struct(irdma_hw_stat_names, num_counters, lifespan); } /** * irdma_get_hw_stats - Populates the rdma_hw_stats structure * @ibdev: device pointer from stack * @stats: stats pointer from stack * @port_num: port number * @index: which hw counter the stack is requesting we update */ static int irdma_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, u32 port_num, int index) { struct irdma_device *iwdev = to_iwdev(ibdev); struct irdma_dev_hw_stats *hw_stats = &iwdev->vsi.pestat->hw_stats; if (iwdev->rf->rdma_ver >= IRDMA_GEN_2) irdma_cqp_gather_stats_cmd(&iwdev->rf->sc_dev, iwdev->vsi.pestat, true); else irdma_cqp_gather_stats_gen1(&iwdev->rf->sc_dev, iwdev->vsi.pestat); memcpy(&stats->value[0], hw_stats, sizeof(u64) * stats->num_counters); return stats->num_counters; } /** * irdma_query_gid - Query port GID * @ibdev: device pointer from stack * @port: port number * @index: Entry index * @gid: Global ID */ static int irdma_query_gid(struct ib_device *ibdev, u32 port, int index, union ib_gid *gid) { struct irdma_device *iwdev = to_iwdev(ibdev); memset(gid->raw, 0, sizeof(gid->raw)); ether_addr_copy(gid->raw, iwdev->netdev->dev_addr); return 0; } /** * mcast_list_add - Add a new mcast item to list * @rf: RDMA PCI function * @new_elem: pointer to element to add */ static void mcast_list_add(struct irdma_pci_f *rf, struct mc_table_list *new_elem) { list_add(&new_elem->list, &rf->mc_qht_list.list); } /** * mcast_list_del - Remove an mcast item from list * @mc_qht_elem: pointer to mcast table list element */ static void mcast_list_del(struct mc_table_list *mc_qht_elem) { if (mc_qht_elem) list_del(&mc_qht_elem->list); } /** * mcast_list_lookup_ip - Search mcast list for address * @rf: RDMA PCI function * @ip_mcast: pointer to mcast IP address */ static struct mc_table_list *mcast_list_lookup_ip(struct irdma_pci_f *rf, u32 *ip_mcast) { struct mc_table_list *mc_qht_el; struct list_head *pos, *q; list_for_each_safe (pos, q, &rf->mc_qht_list.list) { mc_qht_el = list_entry(pos, struct mc_table_list, list); if (!memcmp(mc_qht_el->mc_info.dest_ip, ip_mcast, sizeof(mc_qht_el->mc_info.dest_ip))) return mc_qht_el; } return NULL; } /** * irdma_mcast_cqp_op - perform a mcast cqp operation * @iwdev: irdma device * @mc_grp_ctx: mcast group info * @op: operation * * returns error status */ static int irdma_mcast_cqp_op(struct irdma_device *iwdev, struct irdma_mcast_grp_info *mc_grp_ctx, u8 op) { struct cqp_cmds_info *cqp_info; struct irdma_cqp_request *cqp_request; int status; cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); if (!cqp_request) return -ENOMEM; cqp_request->info.in.u.mc_create.info = *mc_grp_ctx; cqp_info = &cqp_request->info; cqp_info->cqp_cmd = op; cqp_info->post_sq = 1; cqp_info->in.u.mc_create.scratch = (uintptr_t)cqp_request; cqp_info->in.u.mc_create.cqp = &iwdev->rf->cqp.sc_cqp; status = irdma_handle_cqp_op(iwdev->rf, cqp_request); irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); return status; } /** * irdma_mcast_mac - Get the multicast MAC for an IP address * @ip_addr: IPv4 or IPv6 address * @mac: pointer to result MAC address * @ipv4: flag indicating IPv4 or IPv6 * */ void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4) { u8 *ip = (u8 *)ip_addr; if (ipv4) { unsigned char mac4[ETH_ALEN] = {0x01, 0x00, 0x5E, 0x00, 0x00, 0x00}; mac4[3] = ip[2] & 0x7F; mac4[4] = ip[1]; mac4[5] = ip[0]; ether_addr_copy(mac, mac4); } else { unsigned char mac6[ETH_ALEN] = {0x33, 0x33, 0x00, 0x00, 0x00, 0x00}; mac6[2] = ip[3]; mac6[3] = ip[2]; mac6[4] = ip[1]; mac6[5] = ip[0]; ether_addr_copy(mac, mac6); } } /** * irdma_attach_mcast - attach a qp to a multicast group * @ibqp: ptr to qp * @ibgid: pointer to global ID * @lid: local ID * * returns error status */ static int irdma_attach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid) { struct irdma_qp *iwqp = to_iwqp(ibqp); struct irdma_device *iwdev = iwqp->iwdev; struct irdma_pci_f *rf = iwdev->rf; struct mc_table_list *mc_qht_elem; struct irdma_mcast_grp_ctx_entry_info mcg_info = {}; unsigned long flags; u32 ip_addr[4] = {}; u32 mgn; u32 no_mgs; int ret = 0; bool ipv4; u16 vlan_id; union irdma_sockaddr sgid_addr; unsigned char dmac[ETH_ALEN]; rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid); if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid)) { irdma_copy_ip_ntohl(ip_addr, sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32); irdma_get_vlan_mac_ipv6(ip_addr, &vlan_id, NULL); ipv4 = false; ibdev_dbg(&iwdev->ibdev, "VERBS: qp_id=%d, IP6address=%pI6\n", ibqp->qp_num, ip_addr); irdma_mcast_mac(ip_addr, dmac, false); } else { ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr); ipv4 = true; vlan_id = irdma_get_vlan_ipv4(ip_addr); irdma_mcast_mac(ip_addr, dmac, true); ibdev_dbg(&iwdev->ibdev, "VERBS: qp_id=%d, IP4address=%pI4, MAC=%pM\n", ibqp->qp_num, ip_addr, dmac); } spin_lock_irqsave(&rf->qh_list_lock, flags); mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr); if (!mc_qht_elem) { struct irdma_dma_mem *dma_mem_mc; spin_unlock_irqrestore(&rf->qh_list_lock, flags); mc_qht_elem = kzalloc(sizeof(*mc_qht_elem), GFP_KERNEL); if (!mc_qht_elem) return -ENOMEM; mc_qht_elem->mc_info.ipv4_valid = ipv4; memcpy(mc_qht_elem->mc_info.dest_ip, ip_addr, sizeof(mc_qht_elem->mc_info.dest_ip)); ret = irdma_alloc_rsrc(rf, rf->allocated_mcgs, rf->max_mcg, &mgn, &rf->next_mcg); if (ret) { kfree(mc_qht_elem); return -ENOMEM; } mc_qht_elem->mc_info.mgn = mgn; dma_mem_mc = &mc_qht_elem->mc_grp_ctx.dma_mem_mc; dma_mem_mc->size = ALIGN(sizeof(u64) * IRDMA_MAX_MGS_PER_CTX, IRDMA_HW_PAGE_SIZE); dma_mem_mc->va = dma_alloc_coherent(rf->hw.device, dma_mem_mc->size, &dma_mem_mc->pa, GFP_KERNEL); if (!dma_mem_mc->va) { irdma_free_rsrc(rf, rf->allocated_mcgs, mgn); kfree(mc_qht_elem); return -ENOMEM; } mc_qht_elem->mc_grp_ctx.mg_id = (u16)mgn; memcpy(mc_qht_elem->mc_grp_ctx.dest_ip_addr, ip_addr, sizeof(mc_qht_elem->mc_grp_ctx.dest_ip_addr)); mc_qht_elem->mc_grp_ctx.ipv4_valid = ipv4; mc_qht_elem->mc_grp_ctx.vlan_id = vlan_id; if (vlan_id < VLAN_N_VID) mc_qht_elem->mc_grp_ctx.vlan_valid = true; mc_qht_elem->mc_grp_ctx.hmc_fcn_id = iwdev->rf->sc_dev.hmc_fn_id; mc_qht_elem->mc_grp_ctx.qs_handle = iwqp->sc_qp.vsi->qos[iwqp->sc_qp.user_pri].qs_handle; ether_addr_copy(mc_qht_elem->mc_grp_ctx.dest_mac_addr, dmac); spin_lock_irqsave(&rf->qh_list_lock, flags); mcast_list_add(rf, mc_qht_elem); } else { if (mc_qht_elem->mc_grp_ctx.no_of_mgs == IRDMA_MAX_MGS_PER_CTX) { spin_unlock_irqrestore(&rf->qh_list_lock, flags); return -ENOMEM; } } mcg_info.qp_id = iwqp->ibqp.qp_num; no_mgs = mc_qht_elem->mc_grp_ctx.no_of_mgs; irdma_sc_add_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info); spin_unlock_irqrestore(&rf->qh_list_lock, flags); /* Only if there is a change do we need to modify or create */ if (!no_mgs) { ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx, IRDMA_OP_MC_CREATE); } else if (no_mgs != mc_qht_elem->mc_grp_ctx.no_of_mgs) { ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx, IRDMA_OP_MC_MODIFY); } else { return 0; } if (ret) goto error; return 0; error: irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info); if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) { mcast_list_del(mc_qht_elem); dma_free_coherent(rf->hw.device, mc_qht_elem->mc_grp_ctx.dma_mem_mc.size, mc_qht_elem->mc_grp_ctx.dma_mem_mc.va, mc_qht_elem->mc_grp_ctx.dma_mem_mc.pa); mc_qht_elem->mc_grp_ctx.dma_mem_mc.va = NULL; irdma_free_rsrc(rf, rf->allocated_mcgs, mc_qht_elem->mc_grp_ctx.mg_id); kfree(mc_qht_elem); } return ret; } /** * irdma_detach_mcast - detach a qp from a multicast group * @ibqp: ptr to qp * @ibgid: pointer to global ID * @lid: local ID * * returns error status */ static int irdma_detach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid) { struct irdma_qp *iwqp = to_iwqp(ibqp); struct irdma_device *iwdev = iwqp->iwdev; struct irdma_pci_f *rf = iwdev->rf; u32 ip_addr[4] = {}; struct mc_table_list *mc_qht_elem; struct irdma_mcast_grp_ctx_entry_info mcg_info = {}; int ret; unsigned long flags; union irdma_sockaddr sgid_addr; rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid); if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid)) irdma_copy_ip_ntohl(ip_addr, sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32); else ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr); spin_lock_irqsave(&rf->qh_list_lock, flags); mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr); if (!mc_qht_elem) { spin_unlock_irqrestore(&rf->qh_list_lock, flags); ibdev_dbg(&iwdev->ibdev, "VERBS: address not found MCG\n"); return 0; } mcg_info.qp_id = iwqp->ibqp.qp_num; irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info); if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) { mcast_list_del(mc_qht_elem); spin_unlock_irqrestore(&rf->qh_list_lock, flags); ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx, IRDMA_OP_MC_DESTROY); if (ret) { ibdev_dbg(&iwdev->ibdev, "VERBS: failed MC_DESTROY MCG\n"); spin_lock_irqsave(&rf->qh_list_lock, flags); mcast_list_add(rf, mc_qht_elem); spin_unlock_irqrestore(&rf->qh_list_lock, flags); return -EAGAIN; } dma_free_coherent(rf->hw.device, mc_qht_elem->mc_grp_ctx.dma_mem_mc.size, mc_qht_elem->mc_grp_ctx.dma_mem_mc.va, mc_qht_elem->mc_grp_ctx.dma_mem_mc.pa); mc_qht_elem->mc_grp_ctx.dma_mem_mc.va = NULL; irdma_free_rsrc(rf, rf->allocated_mcgs, mc_qht_elem->mc_grp_ctx.mg_id); kfree(mc_qht_elem); } else { spin_unlock_irqrestore(&rf->qh_list_lock, flags); ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx, IRDMA_OP_MC_MODIFY); if (ret) { ibdev_dbg(&iwdev->ibdev, "VERBS: failed Modify MCG\n"); return ret; } } return 0; } static int irdma_create_hw_ah(struct irdma_device *iwdev, struct irdma_ah *ah, bool sleep) { struct irdma_pci_f *rf = iwdev->rf; int err; err = irdma_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah, &ah->sc_ah.ah_info.ah_idx, &rf->next_ah); if (err) return err; err = irdma_ah_cqp_op(rf, &ah->sc_ah, IRDMA_OP_AH_CREATE, sleep, irdma_gsi_ud_qp_ah_cb, &ah->sc_ah); if (err) { ibdev_dbg(&iwdev->ibdev, "VERBS: CQP-OP Create AH fail"); goto err_ah_create; } if (!sleep) { int cnt = CQP_COMPL_WAIT_TIME_MS * CQP_TIMEOUT_THRESHOLD; do { irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq); mdelay(1); } while (!ah->sc_ah.ah_info.ah_valid && --cnt); if (!cnt) { ibdev_dbg(&iwdev->ibdev, "VERBS: CQP create AH timed out"); err = -ETIMEDOUT; goto err_ah_create; } } return 0; err_ah_create: irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah->sc_ah.ah_info.ah_idx); return err; } static int irdma_setup_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *attr) { struct irdma_pd *pd = to_iwpd(ibah->pd); struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah); struct rdma_ah_attr *ah_attr = attr->ah_attr; const struct ib_gid_attr *sgid_attr; struct irdma_device *iwdev = to_iwdev(ibah->pd->device); struct irdma_pci_f *rf = iwdev->rf; struct irdma_sc_ah *sc_ah; struct irdma_ah_info *ah_info; union irdma_sockaddr sgid_addr, dgid_addr; int err; u8 dmac[ETH_ALEN]; ah->pd = pd; sc_ah = &ah->sc_ah; sc_ah->ah_info.vsi = &iwdev->vsi; irdma_sc_init_ah(&rf->sc_dev, sc_ah); ah->sgid_index = ah_attr->grh.sgid_index; sgid_attr = ah_attr->grh.sgid_attr; memcpy(&ah->dgid, &ah_attr->grh.dgid, sizeof(ah->dgid)); rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid_attr->gid); rdma_gid2ip((struct sockaddr *)&dgid_addr, &ah_attr->grh.dgid); ah->av.attrs = *ah_attr; ah->av.net_type = rdma_gid_attr_network_type(sgid_attr); ah_info = &sc_ah->ah_info; ah_info->pd_idx = pd->sc_pd.pd_id; if (ah_attr->ah_flags & IB_AH_GRH) { ah_info->flow_label = ah_attr->grh.flow_label; ah_info->hop_ttl = ah_attr->grh.hop_limit; ah_info->tc_tos = ah_attr->grh.traffic_class; } ether_addr_copy(dmac, ah_attr->roce.dmac); if (ah->av.net_type == RDMA_NETWORK_IPV4) { ah_info->ipv4_valid = true; ah_info->dest_ip_addr[0] = ntohl(dgid_addr.saddr_in.sin_addr.s_addr); ah_info->src_ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr); ah_info->do_lpbk = irdma_ipv4_is_lpb(ah_info->src_ip_addr[0], ah_info->dest_ip_addr[0]); if (ipv4_is_multicast(dgid_addr.saddr_in.sin_addr.s_addr)) { ah_info->do_lpbk = true; irdma_mcast_mac(ah_info->dest_ip_addr, dmac, true); } } else { irdma_copy_ip_ntohl(ah_info->dest_ip_addr, dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32); irdma_copy_ip_ntohl(ah_info->src_ip_addr, sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32); ah_info->do_lpbk = irdma_ipv6_is_lpb(ah_info->src_ip_addr, ah_info->dest_ip_addr); if (rdma_is_multicast_addr(&dgid_addr.saddr_in6.sin6_addr)) { ah_info->do_lpbk = true; irdma_mcast_mac(ah_info->dest_ip_addr, dmac, false); } } err = rdma_read_gid_l2_fields(sgid_attr, &ah_info->vlan_tag, ah_info->mac_addr); if (err) return err; ah_info->dst_arpindex = irdma_add_arp(iwdev->rf, ah_info->dest_ip_addr, ah_info->ipv4_valid, dmac); if (ah_info->dst_arpindex == -1) return -EINVAL; if (ah_info->vlan_tag >= VLAN_N_VID && iwdev->dcb_vlan_mode) ah_info->vlan_tag = 0; if (ah_info->vlan_tag < VLAN_N_VID) { u8 prio = rt_tos2priority(ah_info->tc_tos); prio = irdma_roce_get_vlan_prio(sgid_attr, prio); ah_info->vlan_tag |= (u16)prio << VLAN_PRIO_SHIFT; ah_info->insert_vlan_tag = true; } return 0; } /** * irdma_ah_exists - Check for existing identical AH * @iwdev: irdma device * @new_ah: AH to check for * * returns true if AH is found, false if not found. */ static bool irdma_ah_exists(struct irdma_device *iwdev, struct irdma_ah *new_ah) { struct irdma_ah *ah; u32 key = new_ah->sc_ah.ah_info.dest_ip_addr[0] ^ new_ah->sc_ah.ah_info.dest_ip_addr[1] ^ new_ah->sc_ah.ah_info.dest_ip_addr[2] ^ new_ah->sc_ah.ah_info.dest_ip_addr[3]; hash_for_each_possible(iwdev->ah_hash_tbl, ah, list, key) { /* Set ah_valid and ah_id the same so memcmp can work */ new_ah->sc_ah.ah_info.ah_idx = ah->sc_ah.ah_info.ah_idx; new_ah->sc_ah.ah_info.ah_valid = ah->sc_ah.ah_info.ah_valid; if (!memcmp(&ah->sc_ah.ah_info, &new_ah->sc_ah.ah_info, sizeof(ah->sc_ah.ah_info))) { refcount_inc(&ah->refcnt); new_ah->parent_ah = ah; return true; } } return false; } /** * irdma_destroy_ah - Destroy address handle * @ibah: pointer to address handle * @ah_flags: flags for sleepable */ static int irdma_destroy_ah(struct ib_ah *ibah, u32 ah_flags) { struct irdma_device *iwdev = to_iwdev(ibah->device); struct irdma_ah *ah = to_iwah(ibah); if ((ah_flags & RDMA_DESTROY_AH_SLEEPABLE) && ah->parent_ah) { mutex_lock(&iwdev->ah_tbl_lock); if (!refcount_dec_and_test(&ah->parent_ah->refcnt)) { mutex_unlock(&iwdev->ah_tbl_lock); return 0; } hash_del(&ah->parent_ah->list); kfree(ah->parent_ah); mutex_unlock(&iwdev->ah_tbl_lock); } irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, IRDMA_OP_AH_DESTROY, false, NULL, ah); irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah->sc_ah.ah_info.ah_idx); return 0; } /** * irdma_create_user_ah - create user address handle * @ibah: address handle * @attr: address handle attributes * @udata: User data * * returns 0 on success, error otherwise */ static int irdma_create_user_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *attr, struct ib_udata *udata) { #define IRDMA_CREATE_AH_MIN_RESP_LEN offsetofend(struct irdma_create_ah_resp, rsvd) struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah); struct irdma_device *iwdev = to_iwdev(ibah->pd->device); struct irdma_create_ah_resp uresp; struct irdma_ah *parent_ah; int err; if (udata && udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN) return -EINVAL; err = irdma_setup_ah(ibah, attr); if (err) return err; mutex_lock(&iwdev->ah_tbl_lock); if (!irdma_ah_exists(iwdev, ah)) { err = irdma_create_hw_ah(iwdev, ah, true); if (err) { mutex_unlock(&iwdev->ah_tbl_lock); return err; } /* Add new AH to list */ parent_ah = kmemdup(ah, sizeof(*ah), GFP_KERNEL); if (parent_ah) { u32 key = parent_ah->sc_ah.ah_info.dest_ip_addr[0] ^ parent_ah->sc_ah.ah_info.dest_ip_addr[1] ^ parent_ah->sc_ah.ah_info.dest_ip_addr[2] ^ parent_ah->sc_ah.ah_info.dest_ip_addr[3]; ah->parent_ah = parent_ah; hash_add(iwdev->ah_hash_tbl, &parent_ah->list, key); refcount_set(&parent_ah->refcnt, 1); } } mutex_unlock(&iwdev->ah_tbl_lock); uresp.ah_id = ah->sc_ah.ah_info.ah_idx; err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen)); if (err) irdma_destroy_ah(ibah, attr->flags); return err; } /** * irdma_create_ah - create address handle * @ibah: address handle * @attr: address handle attributes * @udata: NULL * * returns 0 on success, error otherwise */ static int irdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *attr, struct ib_udata *udata) { struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah); struct irdma_device *iwdev = to_iwdev(ibah->pd->device); int err; err = irdma_setup_ah(ibah, attr); if (err) return err; err = irdma_create_hw_ah(iwdev, ah, attr->flags & RDMA_CREATE_AH_SLEEPABLE); return err; } /** * irdma_query_ah - Query address handle * @ibah: pointer to address handle * @ah_attr: address handle attributes */ static int irdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr) { struct irdma_ah *ah = to_iwah(ibah); memset(ah_attr, 0, sizeof(*ah_attr)); if (ah->av.attrs.ah_flags & IB_AH_GRH) { ah_attr->ah_flags = IB_AH_GRH; ah_attr->grh.flow_label = ah->sc_ah.ah_info.flow_label; ah_attr->grh.traffic_class = ah->sc_ah.ah_info.tc_tos; ah_attr->grh.hop_limit = ah->sc_ah.ah_info.hop_ttl; ah_attr->grh.sgid_index = ah->sgid_index; memcpy(&ah_attr->grh.dgid, &ah->dgid, sizeof(ah_attr->grh.dgid)); } return 0; } static enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev, u32 port_num) { return IB_LINK_LAYER_ETHERNET; } static const struct ib_device_ops irdma_roce_dev_ops = { .attach_mcast = irdma_attach_mcast, .create_ah = irdma_create_ah, .create_user_ah = irdma_create_user_ah, .destroy_ah = irdma_destroy_ah, .detach_mcast = irdma_detach_mcast, .get_link_layer = irdma_get_link_layer, .get_port_immutable = irdma_roce_port_immutable, .modify_qp = irdma_modify_qp_roce, .query_ah = irdma_query_ah, .query_pkey = irdma_query_pkey, }; static const struct ib_device_ops irdma_iw_dev_ops = { .get_port_immutable = irdma_iw_port_immutable, .iw_accept = irdma_accept, .iw_add_ref = irdma_qp_add_ref, .iw_connect = irdma_connect, .iw_create_listen = irdma_create_listen, .iw_destroy_listen = irdma_destroy_listen, .iw_get_qp = irdma_get_qp, .iw_reject = irdma_reject, .iw_rem_ref = irdma_qp_rem_ref, .modify_qp = irdma_modify_qp, .query_gid = irdma_query_gid, }; static const struct ib_device_ops irdma_dev_ops = { .owner = THIS_MODULE, .driver_id = RDMA_DRIVER_IRDMA, .uverbs_abi_ver = IRDMA_ABI_VER, .alloc_hw_port_stats = irdma_alloc_hw_port_stats, .alloc_mr = irdma_alloc_mr, .alloc_mw = irdma_alloc_mw, .alloc_pd = irdma_alloc_pd, .alloc_ucontext = irdma_alloc_ucontext, .create_cq = irdma_create_cq, .create_qp = irdma_create_qp, .dealloc_driver = irdma_ib_dealloc_device, .dealloc_mw = irdma_dealloc_mw, .dealloc_pd = irdma_dealloc_pd, .dealloc_ucontext = irdma_dealloc_ucontext, .dereg_mr = irdma_dereg_mr, .destroy_cq = irdma_destroy_cq, .destroy_qp = irdma_destroy_qp, .disassociate_ucontext = irdma_disassociate_ucontext, .get_dev_fw_str = irdma_get_dev_fw_str, .get_dma_mr = irdma_get_dma_mr, .get_hw_stats = irdma_get_hw_stats, .map_mr_sg = irdma_map_mr_sg, .mmap = irdma_mmap, .mmap_free = irdma_mmap_free, .poll_cq = irdma_poll_cq, .post_recv = irdma_post_recv, .post_send = irdma_post_send, .query_device = irdma_query_device, .query_port = irdma_query_port, .query_qp = irdma_query_qp, .reg_user_mr = irdma_reg_user_mr, .reg_user_mr_dmabuf = irdma_reg_user_mr_dmabuf, .req_notify_cq = irdma_req_notify_cq, .resize_cq = irdma_resize_cq, INIT_RDMA_OBJ_SIZE(ib_pd, irdma_pd, ibpd), INIT_RDMA_OBJ_SIZE(ib_ucontext, irdma_ucontext, ibucontext), INIT_RDMA_OBJ_SIZE(ib_ah, irdma_ah, ibah), INIT_RDMA_OBJ_SIZE(ib_cq, irdma_cq, ibcq), INIT_RDMA_OBJ_SIZE(ib_mw, irdma_mr, ibmw), INIT_RDMA_OBJ_SIZE(ib_qp, irdma_qp, ibqp), }; /** * irdma_init_roce_device - initialization of roce rdma device * @iwdev: irdma device */ static void irdma_init_roce_device(struct irdma_device *iwdev) { iwdev->ibdev.node_type = RDMA_NODE_IB_CA; addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid, iwdev->netdev->dev_addr); ib_set_device_ops(&iwdev->ibdev, &irdma_roce_dev_ops); } /** * irdma_init_iw_device - initialization of iwarp rdma device * @iwdev: irdma device */ static void irdma_init_iw_device(struct irdma_device *iwdev) { struct net_device *netdev = iwdev->netdev; iwdev->ibdev.node_type = RDMA_NODE_RNIC; addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid, netdev->dev_addr); memcpy(iwdev->ibdev.iw_ifname, netdev->name, sizeof(iwdev->ibdev.iw_ifname)); ib_set_device_ops(&iwdev->ibdev, &irdma_iw_dev_ops); } /** * irdma_init_rdma_device - initialization of rdma device * @iwdev: irdma device */ static void irdma_init_rdma_device(struct irdma_device *iwdev) { struct pci_dev *pcidev = iwdev->rf->pcidev; if (iwdev->roce_mode) irdma_init_roce_device(iwdev); else irdma_init_iw_device(iwdev); iwdev->ibdev.phys_port_cnt = 1; iwdev->ibdev.num_comp_vectors = iwdev->rf->ceqs_count; iwdev->ibdev.dev.parent = &pcidev->dev; ib_set_device_ops(&iwdev->ibdev, &irdma_dev_ops); } /** * irdma_port_ibevent - indicate port event * @iwdev: irdma device */ void irdma_port_ibevent(struct irdma_device *iwdev) { struct ib_event event; event.device = &iwdev->ibdev; event.element.port_num = 1; event.event = iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; ib_dispatch_event(&event); } /** * irdma_ib_unregister_device - unregister rdma device from IB * core * @iwdev: irdma device */ void irdma_ib_unregister_device(struct irdma_device *iwdev) { iwdev->iw_status = 0; irdma_port_ibevent(iwdev); ib_unregister_device(&iwdev->ibdev); } /** * irdma_ib_register_device - register irdma device to IB core * @iwdev: irdma device */ int irdma_ib_register_device(struct irdma_device *iwdev) { int ret; irdma_init_rdma_device(iwdev); ret = ib_device_set_netdev(&iwdev->ibdev, iwdev->netdev, 1); if (ret) goto error; dma_set_max_seg_size(iwdev->rf->hw.device, UINT_MAX); ret = ib_register_device(&iwdev->ibdev, "irdma%d", iwdev->rf->hw.device); if (ret) goto error; iwdev->iw_status = 1; irdma_port_ibevent(iwdev); return 0; error: if (ret) ibdev_dbg(&iwdev->ibdev, "VERBS: Register RDMA device fail\n"); return ret; } /** * irdma_ib_dealloc_device * @ibdev: ib device * * callback from ibdev dealloc_driver to deallocate resources * unber irdma device */ void irdma_ib_dealloc_device(struct ib_device *ibdev) { struct irdma_device *iwdev = to_iwdev(ibdev); irdma_rt_deinit_hw(iwdev); irdma_ctrl_deinit_hw(iwdev->rf); kfree(iwdev->rf); }
linux-master
drivers/infiniband/hw/irdma/verbs.c
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB /* Copyright (c) 2015 - 2021 Intel Corporation */ #include "osdep.h" #include "hmc.h" #include "defs.h" #include "type.h" #include "protos.h" #include "pble.h" static int add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc); /** * irdma_destroy_pble_prm - destroy prm during module unload * @pble_rsrc: pble resources */ void irdma_destroy_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc) { struct irdma_chunk *chunk; struct irdma_pble_prm *pinfo = &pble_rsrc->pinfo; while (!list_empty(&pinfo->clist)) { chunk = (struct irdma_chunk *) pinfo->clist.next; list_del(&chunk->list); if (chunk->type == PBLE_SD_PAGED) irdma_pble_free_paged_mem(chunk); bitmap_free(chunk->bitmapbuf); kfree(chunk->chunkmem.va); } } /** * irdma_hmc_init_pble - Initialize pble resources during module load * @dev: irdma_sc_dev struct * @pble_rsrc: pble resources */ int irdma_hmc_init_pble(struct irdma_sc_dev *dev, struct irdma_hmc_pble_rsrc *pble_rsrc) { struct irdma_hmc_info *hmc_info; u32 fpm_idx = 0; int status = 0; hmc_info = dev->hmc_info; pble_rsrc->dev = dev; pble_rsrc->fpm_base_addr = hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].base; /* Start pble' on 4k boundary */ if (pble_rsrc->fpm_base_addr & 0xfff) fpm_idx = (4096 - (pble_rsrc->fpm_base_addr & 0xfff)) >> 3; pble_rsrc->unallocated_pble = hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt - fpm_idx; pble_rsrc->next_fpm_addr = pble_rsrc->fpm_base_addr + (fpm_idx << 3); pble_rsrc->pinfo.pble_shift = PBLE_SHIFT; mutex_init(&pble_rsrc->pble_mutex_lock); spin_lock_init(&pble_rsrc->pinfo.prm_lock); INIT_LIST_HEAD(&pble_rsrc->pinfo.clist); if (add_pble_prm(pble_rsrc)) { irdma_destroy_pble_prm(pble_rsrc); status = -ENOMEM; } return status; } /** * get_sd_pd_idx - Returns sd index, pd index and rel_pd_idx from fpm address * @pble_rsrc: structure containing fpm address * @idx: where to return indexes */ static void get_sd_pd_idx(struct irdma_hmc_pble_rsrc *pble_rsrc, struct sd_pd_idx *idx) { idx->sd_idx = (u32)pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE; idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr / IRDMA_HMC_PAGED_BP_SIZE); idx->rel_pd_idx = (idx->pd_idx % IRDMA_HMC_PD_CNT_IN_SD); } /** * add_sd_direct - add sd direct for pble * @pble_rsrc: pble resource ptr * @info: page info for sd */ static int add_sd_direct(struct irdma_hmc_pble_rsrc *pble_rsrc, struct irdma_add_page_info *info) { struct irdma_sc_dev *dev = pble_rsrc->dev; int ret_code = 0; struct sd_pd_idx *idx = &info->idx; struct irdma_chunk *chunk = info->chunk; struct irdma_hmc_info *hmc_info = info->hmc_info; struct irdma_hmc_sd_entry *sd_entry = info->sd_entry; u32 offset = 0; if (!sd_entry->valid) { ret_code = irdma_add_sd_table_entry(dev->hw, hmc_info, info->idx.sd_idx, IRDMA_SD_TYPE_DIRECT, IRDMA_HMC_DIRECT_BP_SIZE); if (ret_code) return ret_code; chunk->type = PBLE_SD_CONTIGOUS; } offset = idx->rel_pd_idx << HMC_PAGED_BP_SHIFT; chunk->size = info->pages << HMC_PAGED_BP_SHIFT; chunk->vaddr = sd_entry->u.bp.addr.va + offset; chunk->fpm_addr = pble_rsrc->next_fpm_addr; ibdev_dbg(to_ibdev(dev), "PBLE: chunk_size[%lld] = 0x%llx vaddr=0x%pK fpm_addr = %llx\n", chunk->size, chunk->size, chunk->vaddr, chunk->fpm_addr); return 0; } /** * fpm_to_idx - given fpm address, get pble index * @pble_rsrc: pble resource management * @addr: fpm address for index */ static u32 fpm_to_idx(struct irdma_hmc_pble_rsrc *pble_rsrc, u64 addr) { u64 idx; idx = (addr - (pble_rsrc->fpm_base_addr)) >> 3; return (u32)idx; } /** * add_bp_pages - add backing pages for sd * @pble_rsrc: pble resource management * @info: page info for sd */ static int add_bp_pages(struct irdma_hmc_pble_rsrc *pble_rsrc, struct irdma_add_page_info *info) { struct irdma_sc_dev *dev = pble_rsrc->dev; u8 *addr; struct irdma_dma_mem mem; struct irdma_hmc_pd_entry *pd_entry; struct irdma_hmc_sd_entry *sd_entry = info->sd_entry; struct irdma_hmc_info *hmc_info = info->hmc_info; struct irdma_chunk *chunk = info->chunk; int status = 0; u32 rel_pd_idx = info->idx.rel_pd_idx; u32 pd_idx = info->idx.pd_idx; u32 i; if (irdma_pble_get_paged_mem(chunk, info->pages)) return -ENOMEM; status = irdma_add_sd_table_entry(dev->hw, hmc_info, info->idx.sd_idx, IRDMA_SD_TYPE_PAGED, IRDMA_HMC_DIRECT_BP_SIZE); if (status) goto error; addr = chunk->vaddr; for (i = 0; i < info->pages; i++) { mem.pa = (u64)chunk->dmainfo.dmaaddrs[i]; mem.size = 4096; mem.va = addr; pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx++]; if (!pd_entry->valid) { status = irdma_add_pd_table_entry(dev, hmc_info, pd_idx++, &mem); if (status) goto error; addr += 4096; } } chunk->fpm_addr = pble_rsrc->next_fpm_addr; return 0; error: irdma_pble_free_paged_mem(chunk); return status; } /** * irdma_get_type - add a sd entry type for sd * @dev: irdma_sc_dev struct * @idx: index of sd * @pages: pages in the sd */ static enum irdma_sd_entry_type irdma_get_type(struct irdma_sc_dev *dev, struct sd_pd_idx *idx, u32 pages) { enum irdma_sd_entry_type sd_entry_type; sd_entry_type = !idx->rel_pd_idx && pages == IRDMA_HMC_PD_CNT_IN_SD ? IRDMA_SD_TYPE_DIRECT : IRDMA_SD_TYPE_PAGED; return sd_entry_type; } /** * add_pble_prm - add a sd entry for pble resoure * @pble_rsrc: pble resource management */ static int add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc) { struct irdma_sc_dev *dev = pble_rsrc->dev; struct irdma_hmc_sd_entry *sd_entry; struct irdma_hmc_info *hmc_info; struct irdma_chunk *chunk; struct irdma_add_page_info info; struct sd_pd_idx *idx = &info.idx; int ret_code = 0; enum irdma_sd_entry_type sd_entry_type; u64 sd_reg_val = 0; struct irdma_virt_mem chunkmem; u32 pages; if (pble_rsrc->unallocated_pble < PBLE_PER_PAGE) return -ENOMEM; if (pble_rsrc->next_fpm_addr & 0xfff) return -EINVAL; chunkmem.size = sizeof(*chunk); chunkmem.va = kzalloc(chunkmem.size, GFP_KERNEL); if (!chunkmem.va) return -ENOMEM; chunk = chunkmem.va; chunk->chunkmem = chunkmem; hmc_info = dev->hmc_info; chunk->dev = dev; chunk->fpm_addr = pble_rsrc->next_fpm_addr; get_sd_pd_idx(pble_rsrc, idx); sd_entry = &hmc_info->sd_table.sd_entry[idx->sd_idx]; pages = (idx->rel_pd_idx) ? (IRDMA_HMC_PD_CNT_IN_SD - idx->rel_pd_idx) : IRDMA_HMC_PD_CNT_IN_SD; pages = min(pages, pble_rsrc->unallocated_pble >> PBLE_512_SHIFT); info.chunk = chunk; info.hmc_info = hmc_info; info.pages = pages; info.sd_entry = sd_entry; if (!sd_entry->valid) sd_entry_type = irdma_get_type(dev, idx, pages); else sd_entry_type = sd_entry->entry_type; ibdev_dbg(to_ibdev(dev), "PBLE: pages = %d, unallocated_pble[%d] current_fpm_addr = %llx\n", pages, pble_rsrc->unallocated_pble, pble_rsrc->next_fpm_addr); ibdev_dbg(to_ibdev(dev), "PBLE: sd_entry_type = %d\n", sd_entry_type); if (sd_entry_type == IRDMA_SD_TYPE_DIRECT) ret_code = add_sd_direct(pble_rsrc, &info); if (ret_code) sd_entry_type = IRDMA_SD_TYPE_PAGED; else pble_rsrc->stats_direct_sds++; if (sd_entry_type == IRDMA_SD_TYPE_PAGED) { ret_code = add_bp_pages(pble_rsrc, &info); if (ret_code) goto error; else pble_rsrc->stats_paged_sds++; } ret_code = irdma_prm_add_pble_mem(&pble_rsrc->pinfo, chunk); if (ret_code) goto error; pble_rsrc->next_fpm_addr += chunk->size; ibdev_dbg(to_ibdev(dev), "PBLE: next_fpm_addr = %llx chunk_size[%llu] = 0x%llx\n", pble_rsrc->next_fpm_addr, chunk->size, chunk->size); pble_rsrc->unallocated_pble -= (u32)(chunk->size >> 3); sd_reg_val = (sd_entry_type == IRDMA_SD_TYPE_PAGED) ? sd_entry->u.pd_table.pd_page_addr.pa : sd_entry->u.bp.addr.pa; if (!sd_entry->valid) { ret_code = irdma_hmc_sd_one(dev, hmc_info->hmc_fn_id, sd_reg_val, idx->sd_idx, sd_entry->entry_type, true); if (ret_code) goto error; } list_add(&chunk->list, &pble_rsrc->pinfo.clist); sd_entry->valid = true; return 0; error: bitmap_free(chunk->bitmapbuf); kfree(chunk->chunkmem.va); return ret_code; } /** * free_lvl2 - fee level 2 pble * @pble_rsrc: pble resource management * @palloc: level 2 pble allocation */ static void free_lvl2(struct irdma_hmc_pble_rsrc *pble_rsrc, struct irdma_pble_alloc *palloc) { u32 i; struct irdma_pble_level2 *lvl2 = &palloc->level2; struct irdma_pble_info *root = &lvl2->root; struct irdma_pble_info *leaf = lvl2->leaf; for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) { if (leaf->addr) irdma_prm_return_pbles(&pble_rsrc->pinfo, &leaf->chunkinfo); else break; } if (root->addr) irdma_prm_return_pbles(&pble_rsrc->pinfo, &root->chunkinfo); kfree(lvl2->leafmem.va); lvl2->leaf = NULL; } /** * get_lvl2_pble - get level 2 pble resource * @pble_rsrc: pble resource management * @palloc: level 2 pble allocation */ static int get_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, struct irdma_pble_alloc *palloc) { u32 lf4k, lflast, total, i; u32 pblcnt = PBLE_PER_PAGE; u64 *addr; struct irdma_pble_level2 *lvl2 = &palloc->level2; struct irdma_pble_info *root = &lvl2->root; struct irdma_pble_info *leaf; int ret_code; u64 fpm_addr; /* number of full 512 (4K) leafs) */ lf4k = palloc->total_cnt >> 9; lflast = palloc->total_cnt % PBLE_PER_PAGE; total = (lflast == 0) ? lf4k : lf4k + 1; lvl2->leaf_cnt = total; lvl2->leafmem.size = (sizeof(*leaf) * total); lvl2->leafmem.va = kzalloc(lvl2->leafmem.size, GFP_KERNEL); if (!lvl2->leafmem.va) return -ENOMEM; lvl2->leaf = lvl2->leafmem.va; leaf = lvl2->leaf; ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo, &root->chunkinfo, total << 3, &root->addr, &fpm_addr); if (ret_code) { kfree(lvl2->leafmem.va); lvl2->leaf = NULL; return -ENOMEM; } root->idx = fpm_to_idx(pble_rsrc, fpm_addr); root->cnt = total; addr = root->addr; for (i = 0; i < total; i++, leaf++) { pblcnt = (lflast && ((i + 1) == total)) ? lflast : PBLE_PER_PAGE; ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo, &leaf->chunkinfo, pblcnt << 3, &leaf->addr, &fpm_addr); if (ret_code) goto error; leaf->idx = fpm_to_idx(pble_rsrc, fpm_addr); leaf->cnt = pblcnt; *addr = (u64)leaf->idx; addr++; } palloc->level = PBLE_LEVEL_2; pble_rsrc->stats_lvl2++; return 0; error: free_lvl2(pble_rsrc, palloc); return -ENOMEM; } /** * get_lvl1_pble - get level 1 pble resource * @pble_rsrc: pble resource management * @palloc: level 1 pble allocation */ static int get_lvl1_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, struct irdma_pble_alloc *palloc) { int ret_code; u64 fpm_addr; struct irdma_pble_info *lvl1 = &palloc->level1; ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo, &lvl1->chunkinfo, palloc->total_cnt << 3, &lvl1->addr, &fpm_addr); if (ret_code) return -ENOMEM; palloc->level = PBLE_LEVEL_1; lvl1->idx = fpm_to_idx(pble_rsrc, fpm_addr); lvl1->cnt = palloc->total_cnt; pble_rsrc->stats_lvl1++; return 0; } /** * get_lvl1_lvl2_pble - calls get_lvl1 and get_lvl2 pble routine * @pble_rsrc: pble resources * @palloc: contains all inforamtion regarding pble (idx + pble addr) * @lvl: Bitmask for requested pble level */ static int get_lvl1_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, struct irdma_pble_alloc *palloc, u8 lvl) { int status = 0; status = get_lvl1_pble(pble_rsrc, palloc); if (!status || lvl == PBLE_LEVEL_1 || palloc->total_cnt <= PBLE_PER_PAGE) return status; status = get_lvl2_pble(pble_rsrc, palloc); return status; } /** * irdma_get_pble - allocate pbles from the prm * @pble_rsrc: pble resources * @palloc: contains all inforamtion regarding pble (idx + pble addr) * @pble_cnt: #of pbles requested * @lvl: requested pble level mask */ int irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, struct irdma_pble_alloc *palloc, u32 pble_cnt, u8 lvl) { int status = 0; int max_sds = 0; int i; palloc->total_cnt = pble_cnt; palloc->level = PBLE_LEVEL_0; mutex_lock(&pble_rsrc->pble_mutex_lock); /*check first to see if we can get pble's without acquiring * additional sd's */ status = get_lvl1_lvl2_pble(pble_rsrc, palloc, lvl); if (!status) goto exit; max_sds = (palloc->total_cnt >> 18) + 1; for (i = 0; i < max_sds; i++) { status = add_pble_prm(pble_rsrc); if (status) break; status = get_lvl1_lvl2_pble(pble_rsrc, palloc, lvl); /* if level1_only, only go through it once */ if (!status || lvl) break; } exit: if (!status) { pble_rsrc->allocdpbles += pble_cnt; pble_rsrc->stats_alloc_ok++; } else { pble_rsrc->stats_alloc_fail++; } mutex_unlock(&pble_rsrc->pble_mutex_lock); return status; } /** * irdma_free_pble - put pbles back into prm * @pble_rsrc: pble resources * @palloc: contains all information regarding pble resource being freed */ void irdma_free_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, struct irdma_pble_alloc *palloc) { pble_rsrc->freedpbles += palloc->total_cnt; if (palloc->level == PBLE_LEVEL_2) free_lvl2(pble_rsrc, palloc); else irdma_prm_return_pbles(&pble_rsrc->pinfo, &palloc->level1.chunkinfo); pble_rsrc->stats_alloc_freed++; }
linux-master
drivers/infiniband/hw/irdma/pble.c
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB /* Copyright (c) 2015 - 2021 Intel Corporation */ #include "main.h" #include "trace.h" static void irdma_cm_post_event(struct irdma_cm_event *event); static void irdma_disconnect_worker(struct work_struct *work); /** * irdma_free_sqbuf - put back puda buffer if refcount is 0 * @vsi: The VSI structure of the device * @bufp: puda buffer to free */ void irdma_free_sqbuf(struct irdma_sc_vsi *vsi, void *bufp) { struct irdma_puda_buf *buf = bufp; struct irdma_puda_rsrc *ilq = vsi->ilq; if (refcount_dec_and_test(&buf->refcount)) irdma_puda_ret_bufpool(ilq, buf); } /** * irdma_record_ird_ord - Record IRD/ORD passed in * @cm_node: connection's node * @conn_ird: connection IRD * @conn_ord: connection ORD */ static void irdma_record_ird_ord(struct irdma_cm_node *cm_node, u32 conn_ird, u32 conn_ord) { if (conn_ird > cm_node->dev->hw_attrs.max_hw_ird) conn_ird = cm_node->dev->hw_attrs.max_hw_ird; if (conn_ord > cm_node->dev->hw_attrs.max_hw_ord) conn_ord = cm_node->dev->hw_attrs.max_hw_ord; else if (!conn_ord && cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO) conn_ord = 1; cm_node->ird_size = conn_ird; cm_node->ord_size = conn_ord; } /** * irdma_copy_ip_ntohl - copy IP address from network to host * @dst: IP address in host order * @src: IP address in network order (big endian) */ void irdma_copy_ip_ntohl(u32 *dst, __be32 *src) { *dst++ = ntohl(*src++); *dst++ = ntohl(*src++); *dst++ = ntohl(*src++); *dst = ntohl(*src); } /** * irdma_copy_ip_htonl - copy IP address from host to network order * @dst: IP address in network order (big endian) * @src: IP address in host order */ void irdma_copy_ip_htonl(__be32 *dst, u32 *src) { *dst++ = htonl(*src++); *dst++ = htonl(*src++); *dst++ = htonl(*src++); *dst = htonl(*src); } /** * irdma_get_addr_info * @cm_node: contains ip/tcp info * @cm_info: to get a copy of the cm_node ip/tcp info */ static void irdma_get_addr_info(struct irdma_cm_node *cm_node, struct irdma_cm_info *cm_info) { memset(cm_info, 0, sizeof(*cm_info)); cm_info->ipv4 = cm_node->ipv4; cm_info->vlan_id = cm_node->vlan_id; memcpy(cm_info->loc_addr, cm_node->loc_addr, sizeof(cm_info->loc_addr)); memcpy(cm_info->rem_addr, cm_node->rem_addr, sizeof(cm_info->rem_addr)); cm_info->loc_port = cm_node->loc_port; cm_info->rem_port = cm_node->rem_port; } /** * irdma_fill_sockaddr4 - fill in addr info for IPv4 connection * @cm_node: connection's node * @event: upper layer's cm event */ static inline void irdma_fill_sockaddr4(struct irdma_cm_node *cm_node, struct iw_cm_event *event) { struct sockaddr_in *laddr = (struct sockaddr_in *)&event->local_addr; struct sockaddr_in *raddr = (struct sockaddr_in *)&event->remote_addr; laddr->sin_family = AF_INET; raddr->sin_family = AF_INET; laddr->sin_port = htons(cm_node->loc_port); raddr->sin_port = htons(cm_node->rem_port); laddr->sin_addr.s_addr = htonl(cm_node->loc_addr[0]); raddr->sin_addr.s_addr = htonl(cm_node->rem_addr[0]); } /** * irdma_fill_sockaddr6 - fill in addr info for IPv6 connection * @cm_node: connection's node * @event: upper layer's cm event */ static inline void irdma_fill_sockaddr6(struct irdma_cm_node *cm_node, struct iw_cm_event *event) { struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)&event->local_addr; struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)&event->remote_addr; laddr6->sin6_family = AF_INET6; raddr6->sin6_family = AF_INET6; laddr6->sin6_port = htons(cm_node->loc_port); raddr6->sin6_port = htons(cm_node->rem_port); irdma_copy_ip_htonl(laddr6->sin6_addr.in6_u.u6_addr32, cm_node->loc_addr); irdma_copy_ip_htonl(raddr6->sin6_addr.in6_u.u6_addr32, cm_node->rem_addr); } /** * irdma_get_cmevent_info - for cm event upcall * @cm_node: connection's node * @cm_id: upper layers cm struct for the event * @event: upper layer's cm event */ static inline void irdma_get_cmevent_info(struct irdma_cm_node *cm_node, struct iw_cm_id *cm_id, struct iw_cm_event *event) { memcpy(&event->local_addr, &cm_id->m_local_addr, sizeof(event->local_addr)); memcpy(&event->remote_addr, &cm_id->m_remote_addr, sizeof(event->remote_addr)); if (cm_node) { event->private_data = cm_node->pdata_buf; event->private_data_len = (u8)cm_node->pdata.size; event->ird = cm_node->ird_size; event->ord = cm_node->ord_size; } } /** * irdma_send_cm_event - upcall cm's event handler * @cm_node: connection's node * @cm_id: upper layer's cm info struct * @type: Event type to indicate * @status: status for the event type */ static int irdma_send_cm_event(struct irdma_cm_node *cm_node, struct iw_cm_id *cm_id, enum iw_cm_event_type type, int status) { struct iw_cm_event event = {}; event.event = type; event.status = status; trace_irdma_send_cm_event(cm_node, cm_id, type, status, __builtin_return_address(0)); ibdev_dbg(&cm_node->iwdev->ibdev, "CM: cm_node %p cm_id=%p state=%d accel=%d event_type=%d status=%d\n", cm_node, cm_id, cm_node->accelerated, cm_node->state, type, status); switch (type) { case IW_CM_EVENT_CONNECT_REQUEST: if (cm_node->ipv4) irdma_fill_sockaddr4(cm_node, &event); else irdma_fill_sockaddr6(cm_node, &event); event.provider_data = cm_node; event.private_data = cm_node->pdata_buf; event.private_data_len = (u8)cm_node->pdata.size; event.ird = cm_node->ird_size; break; case IW_CM_EVENT_CONNECT_REPLY: irdma_get_cmevent_info(cm_node, cm_id, &event); break; case IW_CM_EVENT_ESTABLISHED: event.ird = cm_node->ird_size; event.ord = cm_node->ord_size; break; case IW_CM_EVENT_DISCONNECT: case IW_CM_EVENT_CLOSE: /* Wait if we are in RTS but havent issued the iwcm event upcall */ if (!cm_node->accelerated) wait_for_completion(&cm_node->establish_comp); break; default: return -EINVAL; } return cm_id->event_handler(cm_id, &event); } /** * irdma_timer_list_prep - add connection nodes to a list to perform timer tasks * @cm_core: cm's core * @timer_list: a timer list to which cm_node will be selected */ static void irdma_timer_list_prep(struct irdma_cm_core *cm_core, struct list_head *timer_list) { struct irdma_cm_node *cm_node; int bkt; hash_for_each_rcu(cm_core->cm_hash_tbl, bkt, cm_node, list) { if ((cm_node->close_entry || cm_node->send_entry) && refcount_inc_not_zero(&cm_node->refcnt)) list_add(&cm_node->timer_entry, timer_list); } } /** * irdma_create_event - create cm event * @cm_node: connection's node * @type: Event type to generate */ static struct irdma_cm_event *irdma_create_event(struct irdma_cm_node *cm_node, enum irdma_cm_event_type type) { struct irdma_cm_event *event; if (!cm_node->cm_id) return NULL; event = kzalloc(sizeof(*event), GFP_ATOMIC); if (!event) return NULL; event->type = type; event->cm_node = cm_node; memcpy(event->cm_info.rem_addr, cm_node->rem_addr, sizeof(event->cm_info.rem_addr)); memcpy(event->cm_info.loc_addr, cm_node->loc_addr, sizeof(event->cm_info.loc_addr)); event->cm_info.rem_port = cm_node->rem_port; event->cm_info.loc_port = cm_node->loc_port; event->cm_info.cm_id = cm_node->cm_id; ibdev_dbg(&cm_node->iwdev->ibdev, "CM: node=%p event=%p type=%u dst=%pI4 src=%pI4\n", cm_node, event, type, event->cm_info.loc_addr, event->cm_info.rem_addr); trace_irdma_create_event(cm_node, type, __builtin_return_address(0)); irdma_cm_post_event(event); return event; } /** * irdma_free_retrans_entry - free send entry * @cm_node: connection's node */ static void irdma_free_retrans_entry(struct irdma_cm_node *cm_node) { struct irdma_device *iwdev = cm_node->iwdev; struct irdma_timer_entry *send_entry; send_entry = cm_node->send_entry; if (!send_entry) return; cm_node->send_entry = NULL; irdma_free_sqbuf(&iwdev->vsi, send_entry->sqbuf); kfree(send_entry); refcount_dec(&cm_node->refcnt); } /** * irdma_cleanup_retrans_entry - free send entry with lock * @cm_node: connection's node */ static void irdma_cleanup_retrans_entry(struct irdma_cm_node *cm_node) { unsigned long flags; spin_lock_irqsave(&cm_node->retrans_list_lock, flags); irdma_free_retrans_entry(cm_node); spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); } /** * irdma_form_ah_cm_frame - get a free packet and build frame with address handle * @cm_node: connection's node ionfo to use in frame * @options: pointer to options info * @hdr: pointer mpa header * @pdata: pointer to private data * @flags: indicates FIN or ACK */ static struct irdma_puda_buf *irdma_form_ah_cm_frame(struct irdma_cm_node *cm_node, struct irdma_kmem_info *options, struct irdma_kmem_info *hdr, struct irdma_mpa_priv_info *pdata, u8 flags) { struct irdma_puda_buf *sqbuf; struct irdma_sc_vsi *vsi = &cm_node->iwdev->vsi; u8 *buf; struct tcphdr *tcph; u16 pktsize; u32 opts_len = 0; u32 pd_len = 0; u32 hdr_len = 0; if (!cm_node->ah || !cm_node->ah->ah_info.ah_valid) { ibdev_dbg(&cm_node->iwdev->ibdev, "CM: AH invalid\n"); return NULL; } sqbuf = irdma_puda_get_bufpool(vsi->ilq); if (!sqbuf) { ibdev_dbg(&cm_node->iwdev->ibdev, "CM: SQ buf NULL\n"); return NULL; } sqbuf->ah_id = cm_node->ah->ah_info.ah_idx; buf = sqbuf->mem.va; if (options) opts_len = (u32)options->size; if (hdr) hdr_len = hdr->size; if (pdata) pd_len = pdata->size; pktsize = sizeof(*tcph) + opts_len + hdr_len + pd_len; memset(buf, 0, sizeof(*tcph)); sqbuf->totallen = pktsize; sqbuf->tcphlen = sizeof(*tcph) + opts_len; sqbuf->scratch = cm_node; tcph = (struct tcphdr *)buf; buf += sizeof(*tcph); tcph->source = htons(cm_node->loc_port); tcph->dest = htons(cm_node->rem_port); tcph->seq = htonl(cm_node->tcp_cntxt.loc_seq_num); if (flags & SET_ACK) { cm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt; tcph->ack_seq = htonl(cm_node->tcp_cntxt.loc_ack_num); tcph->ack = 1; } else { tcph->ack_seq = 0; } if (flags & SET_SYN) { cm_node->tcp_cntxt.loc_seq_num++; tcph->syn = 1; } else { cm_node->tcp_cntxt.loc_seq_num += hdr_len + pd_len; } if (flags & SET_FIN) { cm_node->tcp_cntxt.loc_seq_num++; tcph->fin = 1; } if (flags & SET_RST) tcph->rst = 1; tcph->doff = (u16)((sizeof(*tcph) + opts_len + 3) >> 2); sqbuf->tcphlen = tcph->doff << 2; tcph->window = htons(cm_node->tcp_cntxt.rcv_wnd); tcph->urg_ptr = 0; if (opts_len) { memcpy(buf, options->addr, opts_len); buf += opts_len; } if (hdr_len) { memcpy(buf, hdr->addr, hdr_len); buf += hdr_len; } if (pdata && pdata->addr) memcpy(buf, pdata->addr, pdata->size); refcount_set(&sqbuf->refcount, 1); print_hex_dump_debug("ILQ: TRANSMIT ILQ BUFFER", DUMP_PREFIX_OFFSET, 16, 8, sqbuf->mem.va, sqbuf->totallen, false); return sqbuf; } /** * irdma_form_uda_cm_frame - get a free packet and build frame full tcpip packet * @cm_node: connection's node ionfo to use in frame * @options: pointer to options info * @hdr: pointer mpa header * @pdata: pointer to private data * @flags: indicates FIN or ACK */ static struct irdma_puda_buf *irdma_form_uda_cm_frame(struct irdma_cm_node *cm_node, struct irdma_kmem_info *options, struct irdma_kmem_info *hdr, struct irdma_mpa_priv_info *pdata, u8 flags) { struct irdma_puda_buf *sqbuf; struct irdma_sc_vsi *vsi = &cm_node->iwdev->vsi; u8 *buf; struct tcphdr *tcph; struct iphdr *iph; struct ipv6hdr *ip6h; struct ethhdr *ethh; u16 pktsize; u16 eth_hlen = ETH_HLEN; u32 opts_len = 0; u32 pd_len = 0; u32 hdr_len = 0; u16 vtag; sqbuf = irdma_puda_get_bufpool(vsi->ilq); if (!sqbuf) return NULL; buf = sqbuf->mem.va; if (options) opts_len = (u32)options->size; if (hdr) hdr_len = hdr->size; if (pdata) pd_len = pdata->size; if (cm_node->vlan_id < VLAN_N_VID) eth_hlen += 4; if (cm_node->ipv4) pktsize = sizeof(*iph) + sizeof(*tcph); else pktsize = sizeof(*ip6h) + sizeof(*tcph); pktsize += opts_len + hdr_len + pd_len; memset(buf, 0, eth_hlen + pktsize); sqbuf->totallen = pktsize + eth_hlen; sqbuf->maclen = eth_hlen; sqbuf->tcphlen = sizeof(*tcph) + opts_len; sqbuf->scratch = cm_node; ethh = (struct ethhdr *)buf; buf += eth_hlen; if (cm_node->do_lpb) sqbuf->do_lpb = true; if (cm_node->ipv4) { sqbuf->ipv4 = true; iph = (struct iphdr *)buf; buf += sizeof(*iph); tcph = (struct tcphdr *)buf; buf += sizeof(*tcph); ether_addr_copy(ethh->h_dest, cm_node->rem_mac); ether_addr_copy(ethh->h_source, cm_node->loc_mac); if (cm_node->vlan_id < VLAN_N_VID) { ((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q); vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) | cm_node->vlan_id; ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag); ((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto = htons(ETH_P_IP); } else { ethh->h_proto = htons(ETH_P_IP); } iph->version = IPVERSION; iph->ihl = 5; /* 5 * 4Byte words, IP headr len */ iph->tos = cm_node->tos; iph->tot_len = htons(pktsize); iph->id = htons(++cm_node->tcp_cntxt.loc_id); iph->frag_off = htons(0x4000); iph->ttl = 0x40; iph->protocol = IPPROTO_TCP; iph->saddr = htonl(cm_node->loc_addr[0]); iph->daddr = htonl(cm_node->rem_addr[0]); } else { sqbuf->ipv4 = false; ip6h = (struct ipv6hdr *)buf; buf += sizeof(*ip6h); tcph = (struct tcphdr *)buf; buf += sizeof(*tcph); ether_addr_copy(ethh->h_dest, cm_node->rem_mac); ether_addr_copy(ethh->h_source, cm_node->loc_mac); if (cm_node->vlan_id < VLAN_N_VID) { ((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q); vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) | cm_node->vlan_id; ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag); ((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto = htons(ETH_P_IPV6); } else { ethh->h_proto = htons(ETH_P_IPV6); } ip6h->version = 6; ip6h->priority = cm_node->tos >> 4; ip6h->flow_lbl[0] = cm_node->tos << 4; ip6h->flow_lbl[1] = 0; ip6h->flow_lbl[2] = 0; ip6h->payload_len = htons(pktsize - sizeof(*ip6h)); ip6h->nexthdr = 6; ip6h->hop_limit = 128; irdma_copy_ip_htonl(ip6h->saddr.in6_u.u6_addr32, cm_node->loc_addr); irdma_copy_ip_htonl(ip6h->daddr.in6_u.u6_addr32, cm_node->rem_addr); } tcph->source = htons(cm_node->loc_port); tcph->dest = htons(cm_node->rem_port); tcph->seq = htonl(cm_node->tcp_cntxt.loc_seq_num); if (flags & SET_ACK) { cm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt; tcph->ack_seq = htonl(cm_node->tcp_cntxt.loc_ack_num); tcph->ack = 1; } else { tcph->ack_seq = 0; } if (flags & SET_SYN) { cm_node->tcp_cntxt.loc_seq_num++; tcph->syn = 1; } else { cm_node->tcp_cntxt.loc_seq_num += hdr_len + pd_len; } if (flags & SET_FIN) { cm_node->tcp_cntxt.loc_seq_num++; tcph->fin = 1; } if (flags & SET_RST) tcph->rst = 1; tcph->doff = (u16)((sizeof(*tcph) + opts_len + 3) >> 2); sqbuf->tcphlen = tcph->doff << 2; tcph->window = htons(cm_node->tcp_cntxt.rcv_wnd); tcph->urg_ptr = 0; if (opts_len) { memcpy(buf, options->addr, opts_len); buf += opts_len; } if (hdr_len) { memcpy(buf, hdr->addr, hdr_len); buf += hdr_len; } if (pdata && pdata->addr) memcpy(buf, pdata->addr, pdata->size); refcount_set(&sqbuf->refcount, 1); print_hex_dump_debug("ILQ: TRANSMIT ILQ BUFFER", DUMP_PREFIX_OFFSET, 16, 8, sqbuf->mem.va, sqbuf->totallen, false); return sqbuf; } /** * irdma_send_reset - Send RST packet * @cm_node: connection's node */ int irdma_send_reset(struct irdma_cm_node *cm_node) { struct irdma_puda_buf *sqbuf; int flags = SET_RST | SET_ACK; trace_irdma_send_reset(cm_node, 0, __builtin_return_address(0)); sqbuf = cm_node->cm_core->form_cm_frame(cm_node, NULL, NULL, NULL, flags); if (!sqbuf) return -ENOMEM; ibdev_dbg(&cm_node->iwdev->ibdev, "CM: caller: %pS cm_node %p cm_id=%p accel=%d state=%d rem_port=0x%04x, loc_port=0x%04x rem_addr=%pI4 loc_addr=%pI4\n", __builtin_return_address(0), cm_node, cm_node->cm_id, cm_node->accelerated, cm_node->state, cm_node->rem_port, cm_node->loc_port, cm_node->rem_addr, cm_node->loc_addr); return irdma_schedule_cm_timer(cm_node, sqbuf, IRDMA_TIMER_TYPE_SEND, 0, 1); } /** * irdma_active_open_err - send event for active side cm error * @cm_node: connection's node * @reset: Flag to send reset or not */ static void irdma_active_open_err(struct irdma_cm_node *cm_node, bool reset) { trace_irdma_active_open_err(cm_node, reset, __builtin_return_address(0)); irdma_cleanup_retrans_entry(cm_node); cm_node->cm_core->stats_connect_errs++; if (reset) { ibdev_dbg(&cm_node->iwdev->ibdev, "CM: cm_node=%p state=%d\n", cm_node, cm_node->state); refcount_inc(&cm_node->refcnt); irdma_send_reset(cm_node); } cm_node->state = IRDMA_CM_STATE_CLOSED; irdma_create_event(cm_node, IRDMA_CM_EVENT_ABORTED); } /** * irdma_passive_open_err - handle passive side cm error * @cm_node: connection's node * @reset: send reset or just free cm_node */ static void irdma_passive_open_err(struct irdma_cm_node *cm_node, bool reset) { irdma_cleanup_retrans_entry(cm_node); cm_node->cm_core->stats_passive_errs++; cm_node->state = IRDMA_CM_STATE_CLOSED; ibdev_dbg(&cm_node->iwdev->ibdev, "CM: cm_node=%p state =%d\n", cm_node, cm_node->state); trace_irdma_passive_open_err(cm_node, reset, __builtin_return_address(0)); if (reset) irdma_send_reset(cm_node); else irdma_rem_ref_cm_node(cm_node); } /** * irdma_event_connect_error - to create connect error event * @event: cm information for connect event */ static void irdma_event_connect_error(struct irdma_cm_event *event) { struct irdma_qp *iwqp; struct iw_cm_id *cm_id; cm_id = event->cm_node->cm_id; if (!cm_id) return; iwqp = cm_id->provider_data; if (!iwqp || !iwqp->iwdev) return; iwqp->cm_id = NULL; cm_id->provider_data = NULL; irdma_send_cm_event(event->cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY, -ECONNRESET); irdma_rem_ref_cm_node(event->cm_node); } /** * irdma_process_options - process options from TCP header * @cm_node: connection's node * @optionsloc: point to start of options * @optionsize: size of all options * @syn_pkt: flag if syn packet */ static int irdma_process_options(struct irdma_cm_node *cm_node, u8 *optionsloc, u32 optionsize, u32 syn_pkt) { u32 tmp; u32 offset = 0; union all_known_options *all_options; char got_mss_option = 0; while (offset < optionsize) { all_options = (union all_known_options *)(optionsloc + offset); switch (all_options->base.optionnum) { case OPTION_NUM_EOL: offset = optionsize; break; case OPTION_NUM_NONE: offset += 1; continue; case OPTION_NUM_MSS: ibdev_dbg(&cm_node->iwdev->ibdev, "CM: MSS Length: %d Offset: %d Size: %d\n", all_options->mss.len, offset, optionsize); got_mss_option = 1; if (all_options->mss.len != 4) return -EINVAL; tmp = ntohs(all_options->mss.mss); if ((cm_node->ipv4 && (tmp + IRDMA_MTU_TO_MSS_IPV4) < IRDMA_MIN_MTU_IPV4) || (!cm_node->ipv4 && (tmp + IRDMA_MTU_TO_MSS_IPV6) < IRDMA_MIN_MTU_IPV6)) return -EINVAL; if (tmp < cm_node->tcp_cntxt.mss) cm_node->tcp_cntxt.mss = tmp; break; case OPTION_NUM_WINDOW_SCALE: cm_node->tcp_cntxt.snd_wscale = all_options->windowscale.shiftcount; break; default: ibdev_dbg(&cm_node->iwdev->ibdev, "CM: Unsupported TCP Option: %x\n", all_options->base.optionnum); break; } offset += all_options->base.len; } if (!got_mss_option && syn_pkt) cm_node->tcp_cntxt.mss = IRDMA_CM_DEFAULT_MSS; return 0; } /** * irdma_handle_tcp_options - setup TCP context info after parsing TCP options * @cm_node: connection's node * @tcph: pointer tcp header * @optionsize: size of options rcvd * @passive: active or passive flag */ static int irdma_handle_tcp_options(struct irdma_cm_node *cm_node, struct tcphdr *tcph, int optionsize, int passive) { u8 *optionsloc = (u8 *)&tcph[1]; int ret; if (optionsize) { ret = irdma_process_options(cm_node, optionsloc, optionsize, (u32)tcph->syn); if (ret) { ibdev_dbg(&cm_node->iwdev->ibdev, "CM: Node %p, Sending Reset\n", cm_node); if (passive) irdma_passive_open_err(cm_node, true); else irdma_active_open_err(cm_node, true); return ret; } } cm_node->tcp_cntxt.snd_wnd = ntohs(tcph->window) << cm_node->tcp_cntxt.snd_wscale; if (cm_node->tcp_cntxt.snd_wnd > cm_node->tcp_cntxt.max_snd_wnd) cm_node->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.snd_wnd; return 0; } /** * irdma_build_mpa_v1 - build a MPA V1 frame * @cm_node: connection's node * @start_addr: address where to build frame * @mpa_key: to do read0 or write0 */ static void irdma_build_mpa_v1(struct irdma_cm_node *cm_node, void *start_addr, u8 mpa_key) { struct ietf_mpa_v1 *mpa_frame = start_addr; switch (mpa_key) { case MPA_KEY_REQUEST: memcpy(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE); break; case MPA_KEY_REPLY: memcpy(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE); break; default: break; } mpa_frame->flags = IETF_MPA_FLAGS_CRC; mpa_frame->rev = cm_node->mpa_frame_rev; mpa_frame->priv_data_len = htons(cm_node->pdata.size); } /** * irdma_build_mpa_v2 - build a MPA V2 frame * @cm_node: connection's node * @start_addr: buffer start address * @mpa_key: to do read0 or write0 */ static void irdma_build_mpa_v2(struct irdma_cm_node *cm_node, void *start_addr, u8 mpa_key) { struct ietf_mpa_v2 *mpa_frame = start_addr; struct ietf_rtr_msg *rtr_msg = &mpa_frame->rtr_msg; u16 ctrl_ird, ctrl_ord; /* initialize the upper 5 bytes of the frame */ irdma_build_mpa_v1(cm_node, start_addr, mpa_key); mpa_frame->flags |= IETF_MPA_V2_FLAG; if (cm_node->iwdev->iw_ooo) { mpa_frame->flags |= IETF_MPA_FLAGS_MARKERS; cm_node->rcv_mark_en = true; } mpa_frame->priv_data_len = cpu_to_be16(be16_to_cpu(mpa_frame->priv_data_len) + IETF_RTR_MSG_SIZE); /* initialize RTR msg */ if (cm_node->mpav2_ird_ord == IETF_NO_IRD_ORD) { ctrl_ird = IETF_NO_IRD_ORD; ctrl_ord = IETF_NO_IRD_ORD; } else { ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ? IETF_NO_IRD_ORD : cm_node->ird_size; ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ? IETF_NO_IRD_ORD : cm_node->ord_size; } ctrl_ird |= IETF_PEER_TO_PEER; switch (mpa_key) { case MPA_KEY_REQUEST: ctrl_ord |= IETF_RDMA0_WRITE; ctrl_ord |= IETF_RDMA0_READ; break; case MPA_KEY_REPLY: switch (cm_node->send_rdma0_op) { case SEND_RDMA_WRITE_ZERO: ctrl_ord |= IETF_RDMA0_WRITE; break; case SEND_RDMA_READ_ZERO: ctrl_ord |= IETF_RDMA0_READ; break; } break; default: break; } rtr_msg->ctrl_ird = htons(ctrl_ird); rtr_msg->ctrl_ord = htons(ctrl_ord); } /** * irdma_cm_build_mpa_frame - build mpa frame for mpa version 1 or version 2 * @cm_node: connection's node * @mpa: mpa: data buffer * @mpa_key: to do read0 or write0 */ static int irdma_cm_build_mpa_frame(struct irdma_cm_node *cm_node, struct irdma_kmem_info *mpa, u8 mpa_key) { int hdr_len = 0; switch (cm_node->mpa_frame_rev) { case IETF_MPA_V1: hdr_len = sizeof(struct ietf_mpa_v1); irdma_build_mpa_v1(cm_node, mpa->addr, mpa_key); break; case IETF_MPA_V2: hdr_len = sizeof(struct ietf_mpa_v2); irdma_build_mpa_v2(cm_node, mpa->addr, mpa_key); break; default: break; } return hdr_len; } /** * irdma_send_mpa_request - active node send mpa request to passive node * @cm_node: connection's node */ static int irdma_send_mpa_request(struct irdma_cm_node *cm_node) { struct irdma_puda_buf *sqbuf; cm_node->mpa_hdr.addr = &cm_node->mpa_v2_frame; cm_node->mpa_hdr.size = irdma_cm_build_mpa_frame(cm_node, &cm_node->mpa_hdr, MPA_KEY_REQUEST); if (!cm_node->mpa_hdr.size) { ibdev_dbg(&cm_node->iwdev->ibdev, "CM: mpa size = %d\n", cm_node->mpa_hdr.size); return -EINVAL; } sqbuf = cm_node->cm_core->form_cm_frame(cm_node, NULL, &cm_node->mpa_hdr, &cm_node->pdata, SET_ACK); if (!sqbuf) return -ENOMEM; return irdma_schedule_cm_timer(cm_node, sqbuf, IRDMA_TIMER_TYPE_SEND, 1, 0); } /** * irdma_send_mpa_reject - * @cm_node: connection's node * @pdata: reject data for connection * @plen: length of reject data */ static int irdma_send_mpa_reject(struct irdma_cm_node *cm_node, const void *pdata, u8 plen) { struct irdma_puda_buf *sqbuf; struct irdma_mpa_priv_info priv_info; cm_node->mpa_hdr.addr = &cm_node->mpa_v2_frame; cm_node->mpa_hdr.size = irdma_cm_build_mpa_frame(cm_node, &cm_node->mpa_hdr, MPA_KEY_REPLY); cm_node->mpa_frame.flags |= IETF_MPA_FLAGS_REJECT; priv_info.addr = pdata; priv_info.size = plen; sqbuf = cm_node->cm_core->form_cm_frame(cm_node, NULL, &cm_node->mpa_hdr, &priv_info, SET_ACK | SET_FIN); if (!sqbuf) return -ENOMEM; cm_node->state = IRDMA_CM_STATE_FIN_WAIT1; return irdma_schedule_cm_timer(cm_node, sqbuf, IRDMA_TIMER_TYPE_SEND, 1, 0); } /** * irdma_negotiate_mpa_v2_ird_ord - negotiate MPAv2 IRD/ORD * @cm_node: connection's node * @buf: Data pointer */ static int irdma_negotiate_mpa_v2_ird_ord(struct irdma_cm_node *cm_node, u8 *buf) { struct ietf_mpa_v2 *mpa_v2_frame; struct ietf_rtr_msg *rtr_msg; u16 ird_size; u16 ord_size; u16 ctrl_ord; u16 ctrl_ird; mpa_v2_frame = (struct ietf_mpa_v2 *)buf; rtr_msg = &mpa_v2_frame->rtr_msg; /* parse rtr message */ ctrl_ord = ntohs(rtr_msg->ctrl_ord); ctrl_ird = ntohs(rtr_msg->ctrl_ird); ird_size = ctrl_ird & IETF_NO_IRD_ORD; ord_size = ctrl_ord & IETF_NO_IRD_ORD; if (!(ctrl_ird & IETF_PEER_TO_PEER)) return -EOPNOTSUPP; if (ird_size == IETF_NO_IRD_ORD || ord_size == IETF_NO_IRD_ORD) { cm_node->mpav2_ird_ord = IETF_NO_IRD_ORD; goto negotiate_done; } if (cm_node->state != IRDMA_CM_STATE_MPAREQ_SENT) { /* responder */ if (!ord_size && (ctrl_ord & IETF_RDMA0_READ)) cm_node->ird_size = 1; if (cm_node->ord_size > ird_size) cm_node->ord_size = ird_size; } else { /* initiator */ if (!ird_size && (ctrl_ord & IETF_RDMA0_READ)) /* Remote peer doesn't support RDMA0_READ */ return -EOPNOTSUPP; if (cm_node->ord_size > ird_size) cm_node->ord_size = ird_size; if (cm_node->ird_size < ord_size) /* no resources available */ return -EINVAL; } negotiate_done: if (ctrl_ord & IETF_RDMA0_READ) cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO; else if (ctrl_ord & IETF_RDMA0_WRITE) cm_node->send_rdma0_op = SEND_RDMA_WRITE_ZERO; else /* Not supported RDMA0 operation */ return -EOPNOTSUPP; ibdev_dbg(&cm_node->iwdev->ibdev, "CM: MPAV2 Negotiated ORD: %d, IRD: %d\n", cm_node->ord_size, cm_node->ird_size); trace_irdma_negotiate_mpa_v2(cm_node); return 0; } /** * irdma_parse_mpa - process an IETF MPA frame * @cm_node: connection's node * @buf: Data pointer * @type: to return accept or reject * @len: Len of mpa buffer */ static int irdma_parse_mpa(struct irdma_cm_node *cm_node, u8 *buf, u32 *type, u32 len) { struct ietf_mpa_v1 *mpa_frame; int mpa_hdr_len, priv_data_len, ret; *type = IRDMA_MPA_REQUEST_ACCEPT; if (len < sizeof(struct ietf_mpa_v1)) { ibdev_dbg(&cm_node->iwdev->ibdev, "CM: ietf buffer small (%x)\n", len); return -EINVAL; } mpa_frame = (struct ietf_mpa_v1 *)buf; mpa_hdr_len = sizeof(struct ietf_mpa_v1); priv_data_len = ntohs(mpa_frame->priv_data_len); if (priv_data_len > IETF_MAX_PRIV_DATA_LEN) { ibdev_dbg(&cm_node->iwdev->ibdev, "CM: private_data too big %d\n", priv_data_len); return -EOVERFLOW; } if (mpa_frame->rev != IETF_MPA_V1 && mpa_frame->rev != IETF_MPA_V2) { ibdev_dbg(&cm_node->iwdev->ibdev, "CM: unsupported mpa rev = %d\n", mpa_frame->rev); return -EINVAL; } if (mpa_frame->rev > cm_node->mpa_frame_rev) { ibdev_dbg(&cm_node->iwdev->ibdev, "CM: rev %d\n", mpa_frame->rev); return -EINVAL; } cm_node->mpa_frame_rev = mpa_frame->rev; if (cm_node->state != IRDMA_CM_STATE_MPAREQ_SENT) { if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE)) { ibdev_dbg(&cm_node->iwdev->ibdev, "CM: Unexpected MPA Key received\n"); return -EINVAL; } } else { if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE)) { ibdev_dbg(&cm_node->iwdev->ibdev, "CM: Unexpected MPA Key received\n"); return -EINVAL; } } if (priv_data_len + mpa_hdr_len > len) { ibdev_dbg(&cm_node->iwdev->ibdev, "CM: ietf buffer len(%x + %x != %x)\n", priv_data_len, mpa_hdr_len, len); return -EOVERFLOW; } if (len > IRDMA_MAX_CM_BUF) { ibdev_dbg(&cm_node->iwdev->ibdev, "CM: ietf buffer large len = %d\n", len); return -EOVERFLOW; } switch (mpa_frame->rev) { case IETF_MPA_V2: mpa_hdr_len += IETF_RTR_MSG_SIZE; ret = irdma_negotiate_mpa_v2_ird_ord(cm_node, buf); if (ret) return ret; break; case IETF_MPA_V1: default: break; } memcpy(cm_node->pdata_buf, buf + mpa_hdr_len, priv_data_len); cm_node->pdata.size = priv_data_len; if (mpa_frame->flags & IETF_MPA_FLAGS_REJECT) *type = IRDMA_MPA_REQUEST_REJECT; if (mpa_frame->flags & IETF_MPA_FLAGS_MARKERS) cm_node->snd_mark_en = true; return 0; } /** * irdma_schedule_cm_timer * @cm_node: connection's node * @sqbuf: buffer to send * @type: if it is send or close * @send_retrans: if rexmits to be done * @close_when_complete: is cm_node to be removed * * note - cm_node needs to be protected before calling this. Encase in: * irdma_rem_ref_cm_node(cm_core, cm_node); * irdma_schedule_cm_timer(...) * refcount_inc(&cm_node->refcnt); */ int irdma_schedule_cm_timer(struct irdma_cm_node *cm_node, struct irdma_puda_buf *sqbuf, enum irdma_timer_type type, int send_retrans, int close_when_complete) { struct irdma_sc_vsi *vsi = &cm_node->iwdev->vsi; struct irdma_cm_core *cm_core = cm_node->cm_core; struct irdma_timer_entry *new_send; u32 was_timer_set; unsigned long flags; new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC); if (!new_send) { if (type != IRDMA_TIMER_TYPE_CLOSE) irdma_free_sqbuf(vsi, sqbuf); return -ENOMEM; } new_send->retrycount = IRDMA_DEFAULT_RETRYS; new_send->retranscount = IRDMA_DEFAULT_RETRANS; new_send->sqbuf = sqbuf; new_send->timetosend = jiffies; new_send->type = type; new_send->send_retrans = send_retrans; new_send->close_when_complete = close_when_complete; if (type == IRDMA_TIMER_TYPE_CLOSE) { new_send->timetosend += (HZ / 10); if (cm_node->close_entry) { kfree(new_send); ibdev_dbg(&cm_node->iwdev->ibdev, "CM: already close entry\n"); return -EINVAL; } cm_node->close_entry = new_send; } else { /* type == IRDMA_TIMER_TYPE_SEND */ spin_lock_irqsave(&cm_node->retrans_list_lock, flags); cm_node->send_entry = new_send; refcount_inc(&cm_node->refcnt); spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); new_send->timetosend = jiffies + IRDMA_RETRY_TIMEOUT; refcount_inc(&sqbuf->refcount); irdma_puda_send_buf(vsi->ilq, sqbuf); if (!send_retrans) { irdma_cleanup_retrans_entry(cm_node); if (close_when_complete) irdma_rem_ref_cm_node(cm_node); return 0; } } spin_lock_irqsave(&cm_core->ht_lock, flags); was_timer_set = timer_pending(&cm_core->tcp_timer); if (!was_timer_set) { cm_core->tcp_timer.expires = new_send->timetosend; add_timer(&cm_core->tcp_timer); } spin_unlock_irqrestore(&cm_core->ht_lock, flags); return 0; } /** * irdma_retrans_expired - Could not rexmit the packet * @cm_node: connection's node */ static void irdma_retrans_expired(struct irdma_cm_node *cm_node) { enum irdma_cm_node_state state = cm_node->state; cm_node->state = IRDMA_CM_STATE_CLOSED; switch (state) { case IRDMA_CM_STATE_SYN_RCVD: case IRDMA_CM_STATE_CLOSING: irdma_rem_ref_cm_node(cm_node); break; case IRDMA_CM_STATE_FIN_WAIT1: case IRDMA_CM_STATE_LAST_ACK: irdma_send_reset(cm_node); break; default: refcount_inc(&cm_node->refcnt); irdma_send_reset(cm_node); irdma_create_event(cm_node, IRDMA_CM_EVENT_ABORTED); break; } } /** * irdma_handle_close_entry - for handling retry/timeouts * @cm_node: connection's node * @rem_node: flag for remove cm_node */ static void irdma_handle_close_entry(struct irdma_cm_node *cm_node, u32 rem_node) { struct irdma_timer_entry *close_entry = cm_node->close_entry; struct irdma_qp *iwqp; unsigned long flags; if (!close_entry) return; iwqp = (struct irdma_qp *)close_entry->sqbuf; if (iwqp) { spin_lock_irqsave(&iwqp->lock, flags); if (iwqp->cm_id) { iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSED; iwqp->hw_iwarp_state = IRDMA_QP_STATE_ERROR; iwqp->last_aeq = IRDMA_AE_RESET_SENT; iwqp->ibqp_state = IB_QPS_ERR; spin_unlock_irqrestore(&iwqp->lock, flags); irdma_cm_disconn(iwqp); } else { spin_unlock_irqrestore(&iwqp->lock, flags); } } else if (rem_node) { /* TIME_WAIT state */ irdma_rem_ref_cm_node(cm_node); } kfree(close_entry); cm_node->close_entry = NULL; } /** * irdma_cm_timer_tick - system's timer expired callback * @t: Pointer to timer_list */ static void irdma_cm_timer_tick(struct timer_list *t) { unsigned long nexttimeout = jiffies + IRDMA_LONG_TIME; struct irdma_cm_node *cm_node; struct irdma_timer_entry *send_entry, *close_entry; struct list_head *list_core_temp; struct list_head *list_node; struct irdma_cm_core *cm_core = from_timer(cm_core, t, tcp_timer); struct irdma_sc_vsi *vsi; u32 settimer = 0; unsigned long timetosend; unsigned long flags; struct list_head timer_list; INIT_LIST_HEAD(&timer_list); rcu_read_lock(); irdma_timer_list_prep(cm_core, &timer_list); rcu_read_unlock(); list_for_each_safe (list_node, list_core_temp, &timer_list) { cm_node = container_of(list_node, struct irdma_cm_node, timer_entry); close_entry = cm_node->close_entry; if (close_entry) { if (time_after(close_entry->timetosend, jiffies)) { if (nexttimeout > close_entry->timetosend || !settimer) { nexttimeout = close_entry->timetosend; settimer = 1; } } else { irdma_handle_close_entry(cm_node, 1); } } spin_lock_irqsave(&cm_node->retrans_list_lock, flags); send_entry = cm_node->send_entry; if (!send_entry) goto done; if (time_after(send_entry->timetosend, jiffies)) { if (cm_node->state != IRDMA_CM_STATE_OFFLOADED) { if (nexttimeout > send_entry->timetosend || !settimer) { nexttimeout = send_entry->timetosend; settimer = 1; } } else { irdma_free_retrans_entry(cm_node); } goto done; } if (cm_node->state == IRDMA_CM_STATE_OFFLOADED || cm_node->state == IRDMA_CM_STATE_CLOSED) { irdma_free_retrans_entry(cm_node); goto done; } if (!send_entry->retranscount || !send_entry->retrycount) { irdma_free_retrans_entry(cm_node); spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); irdma_retrans_expired(cm_node); cm_node->state = IRDMA_CM_STATE_CLOSED; spin_lock_irqsave(&cm_node->retrans_list_lock, flags); goto done; } spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); vsi = &cm_node->iwdev->vsi; if (!cm_node->ack_rcvd) { refcount_inc(&send_entry->sqbuf->refcount); irdma_puda_send_buf(vsi->ilq, send_entry->sqbuf); cm_node->cm_core->stats_pkt_retrans++; } spin_lock_irqsave(&cm_node->retrans_list_lock, flags); if (send_entry->send_retrans) { send_entry->retranscount--; timetosend = (IRDMA_RETRY_TIMEOUT << (IRDMA_DEFAULT_RETRANS - send_entry->retranscount)); send_entry->timetosend = jiffies + min(timetosend, IRDMA_MAX_TIMEOUT); if (nexttimeout > send_entry->timetosend || !settimer) { nexttimeout = send_entry->timetosend; settimer = 1; } } else { int close_when_complete; close_when_complete = send_entry->close_when_complete; irdma_free_retrans_entry(cm_node); if (close_when_complete) irdma_rem_ref_cm_node(cm_node); } done: spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); irdma_rem_ref_cm_node(cm_node); } if (settimer) { spin_lock_irqsave(&cm_core->ht_lock, flags); if (!timer_pending(&cm_core->tcp_timer)) { cm_core->tcp_timer.expires = nexttimeout; add_timer(&cm_core->tcp_timer); } spin_unlock_irqrestore(&cm_core->ht_lock, flags); } } /** * irdma_send_syn - send SYN packet * @cm_node: connection's node * @sendack: flag to set ACK bit or not */ int irdma_send_syn(struct irdma_cm_node *cm_node, u32 sendack) { struct irdma_puda_buf *sqbuf; int flags = SET_SYN; char optionsbuf[sizeof(struct option_mss) + sizeof(struct option_windowscale) + sizeof(struct option_base) + TCP_OPTIONS_PADDING]; struct irdma_kmem_info opts; int optionssize = 0; /* Sending MSS option */ union all_known_options *options; opts.addr = optionsbuf; if (!cm_node) return -EINVAL; options = (union all_known_options *)&optionsbuf[optionssize]; options->mss.optionnum = OPTION_NUM_MSS; options->mss.len = sizeof(struct option_mss); options->mss.mss = htons(cm_node->tcp_cntxt.mss); optionssize += sizeof(struct option_mss); options = (union all_known_options *)&optionsbuf[optionssize]; options->windowscale.optionnum = OPTION_NUM_WINDOW_SCALE; options->windowscale.len = sizeof(struct option_windowscale); options->windowscale.shiftcount = cm_node->tcp_cntxt.rcv_wscale; optionssize += sizeof(struct option_windowscale); options = (union all_known_options *)&optionsbuf[optionssize]; options->eol = OPTION_NUM_EOL; optionssize += 1; if (sendack) flags |= SET_ACK; opts.size = optionssize; sqbuf = cm_node->cm_core->form_cm_frame(cm_node, &opts, NULL, NULL, flags); if (!sqbuf) return -ENOMEM; return irdma_schedule_cm_timer(cm_node, sqbuf, IRDMA_TIMER_TYPE_SEND, 1, 0); } /** * irdma_send_ack - Send ACK packet * @cm_node: connection's node */ void irdma_send_ack(struct irdma_cm_node *cm_node) { struct irdma_puda_buf *sqbuf; struct irdma_sc_vsi *vsi = &cm_node->iwdev->vsi; sqbuf = cm_node->cm_core->form_cm_frame(cm_node, NULL, NULL, NULL, SET_ACK); if (sqbuf) irdma_puda_send_buf(vsi->ilq, sqbuf); } /** * irdma_send_fin - Send FIN pkt * @cm_node: connection's node */ static int irdma_send_fin(struct irdma_cm_node *cm_node) { struct irdma_puda_buf *sqbuf; sqbuf = cm_node->cm_core->form_cm_frame(cm_node, NULL, NULL, NULL, SET_ACK | SET_FIN); if (!sqbuf) return -ENOMEM; return irdma_schedule_cm_timer(cm_node, sqbuf, IRDMA_TIMER_TYPE_SEND, 1, 0); } /** * irdma_find_listener - find a cm node listening on this addr-port pair * @cm_core: cm's core * @dst_addr: listener ip addr * @ipv4: flag indicating IPv4 when true * @dst_port: listener tcp port num * @vlan_id: virtual LAN ID * @listener_state: state to match with listen node's */ static struct irdma_cm_listener * irdma_find_listener(struct irdma_cm_core *cm_core, u32 *dst_addr, bool ipv4, u16 dst_port, u16 vlan_id, enum irdma_cm_listener_state listener_state) { struct irdma_cm_listener *listen_node; static const u32 ip_zero[4] = { 0, 0, 0, 0 }; u32 listen_addr[4]; u16 listen_port; unsigned long flags; /* walk list and find cm_node associated with this session ID */ spin_lock_irqsave(&cm_core->listen_list_lock, flags); list_for_each_entry (listen_node, &cm_core->listen_list, list) { memcpy(listen_addr, listen_node->loc_addr, sizeof(listen_addr)); listen_port = listen_node->loc_port; if (listen_node->ipv4 != ipv4 || listen_port != dst_port || !(listener_state & listen_node->listener_state)) continue; /* compare node pair, return node handle if a match */ if (!memcmp(listen_addr, ip_zero, sizeof(listen_addr)) || (!memcmp(listen_addr, dst_addr, sizeof(listen_addr)) && vlan_id == listen_node->vlan_id)) { refcount_inc(&listen_node->refcnt); spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); trace_irdma_find_listener(listen_node); return listen_node; } } spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); return NULL; } /** * irdma_del_multiple_qhash - Remove qhash and child listens * @iwdev: iWarp device * @cm_info: CM info for parent listen node * @cm_parent_listen_node: The parent listen node */ static int irdma_del_multiple_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cm_info, struct irdma_cm_listener *cm_parent_listen_node) { struct irdma_cm_listener *child_listen_node; struct list_head *pos, *tpos; unsigned long flags; int ret = -EINVAL; spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags); list_for_each_safe (pos, tpos, &cm_parent_listen_node->child_listen_list) { child_listen_node = list_entry(pos, struct irdma_cm_listener, child_listen_list); if (child_listen_node->ipv4) ibdev_dbg(&iwdev->ibdev, "CM: removing child listen for IP=%pI4, port=%d, vlan=%d\n", child_listen_node->loc_addr, child_listen_node->loc_port, child_listen_node->vlan_id); else ibdev_dbg(&iwdev->ibdev, "CM: removing child listen for IP=%pI6, port=%d, vlan=%d\n", child_listen_node->loc_addr, child_listen_node->loc_port, child_listen_node->vlan_id); trace_irdma_del_multiple_qhash(child_listen_node); list_del(pos); memcpy(cm_info->loc_addr, child_listen_node->loc_addr, sizeof(cm_info->loc_addr)); cm_info->vlan_id = child_listen_node->vlan_id; if (child_listen_node->qhash_set) { ret = irdma_manage_qhash(iwdev, cm_info, IRDMA_QHASH_TYPE_TCP_SYN, IRDMA_QHASH_MANAGE_TYPE_DELETE, NULL, false); child_listen_node->qhash_set = false; } else { ret = 0; } ibdev_dbg(&iwdev->ibdev, "CM: Child listen node freed = %p\n", child_listen_node); kfree(child_listen_node); cm_parent_listen_node->cm_core->stats_listen_nodes_destroyed++; } spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags); return ret; } static u8 irdma_iw_get_vlan_prio(u32 *loc_addr, u8 prio, bool ipv4) { struct net_device *ndev = NULL; rcu_read_lock(); if (ipv4) { ndev = ip_dev_find(&init_net, htonl(loc_addr[0])); } else if (IS_ENABLED(CONFIG_IPV6)) { struct net_device *ip_dev; struct in6_addr laddr6; irdma_copy_ip_htonl(laddr6.in6_u.u6_addr32, loc_addr); for_each_netdev_rcu (&init_net, ip_dev) { if (ipv6_chk_addr(&init_net, &laddr6, ip_dev, 1)) { ndev = ip_dev; break; } } } if (!ndev) goto done; if (is_vlan_dev(ndev)) prio = (vlan_dev_get_egress_qos_mask(ndev, prio) & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; if (ipv4) dev_put(ndev); done: rcu_read_unlock(); return prio; } /** * irdma_get_vlan_mac_ipv6 - Gets the vlan and mac * @addr: local IPv6 address * @vlan_id: vlan id for the given IPv6 address * @mac: mac address for the given IPv6 address * * Returns the vlan id and mac for an IPv6 address. */ void irdma_get_vlan_mac_ipv6(u32 *addr, u16 *vlan_id, u8 *mac) { struct net_device *ip_dev = NULL; struct in6_addr laddr6; if (!IS_ENABLED(CONFIG_IPV6)) return; irdma_copy_ip_htonl(laddr6.in6_u.u6_addr32, addr); if (vlan_id) *vlan_id = 0xFFFF; /* Match rdma_vlan_dev_vlan_id() */ if (mac) eth_zero_addr(mac); rcu_read_lock(); for_each_netdev_rcu (&init_net, ip_dev) { if (ipv6_chk_addr(&init_net, &laddr6, ip_dev, 1)) { if (vlan_id) *vlan_id = rdma_vlan_dev_vlan_id(ip_dev); if (ip_dev->dev_addr && mac) ether_addr_copy(mac, ip_dev->dev_addr); break; } } rcu_read_unlock(); } /** * irdma_get_vlan_ipv4 - Returns the vlan_id for IPv4 address * @addr: local IPv4 address */ u16 irdma_get_vlan_ipv4(u32 *addr) { struct net_device *netdev; u16 vlan_id = 0xFFFF; netdev = ip_dev_find(&init_net, htonl(addr[0])); if (netdev) { vlan_id = rdma_vlan_dev_vlan_id(netdev); dev_put(netdev); } return vlan_id; } /** * irdma_add_mqh_6 - Adds multiple qhashes for IPv6 * @iwdev: iWarp device * @cm_info: CM info for parent listen node * @cm_parent_listen_node: The parent listen node * * Adds a qhash and a child listen node for every IPv6 address * on the adapter and adds the associated qhash filter */ static int irdma_add_mqh_6(struct irdma_device *iwdev, struct irdma_cm_info *cm_info, struct irdma_cm_listener *cm_parent_listen_node) { struct net_device *ip_dev; struct inet6_dev *idev; struct inet6_ifaddr *ifp, *tmp; struct irdma_cm_listener *child_listen_node; unsigned long flags; int ret = 0; rtnl_lock(); for_each_netdev(&init_net, ip_dev) { if (!(ip_dev->flags & IFF_UP)) continue; if (((rdma_vlan_dev_vlan_id(ip_dev) >= VLAN_N_VID) || (rdma_vlan_dev_real_dev(ip_dev) != iwdev->netdev)) && ip_dev != iwdev->netdev) continue; idev = __in6_dev_get(ip_dev); if (!idev) { ibdev_dbg(&iwdev->ibdev, "CM: idev == NULL\n"); break; } list_for_each_entry_safe (ifp, tmp, &idev->addr_list, if_list) { ibdev_dbg(&iwdev->ibdev, "CM: IP=%pI6, vlan_id=%d, MAC=%pM\n", &ifp->addr, rdma_vlan_dev_vlan_id(ip_dev), ip_dev->dev_addr); child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_KERNEL); ibdev_dbg(&iwdev->ibdev, "CM: Allocating child listener %p\n", child_listen_node); if (!child_listen_node) { ibdev_dbg(&iwdev->ibdev, "CM: listener memory allocation\n"); ret = -ENOMEM; goto exit; } cm_info->vlan_id = rdma_vlan_dev_vlan_id(ip_dev); cm_parent_listen_node->vlan_id = cm_info->vlan_id; memcpy(child_listen_node, cm_parent_listen_node, sizeof(*child_listen_node)); irdma_copy_ip_ntohl(child_listen_node->loc_addr, ifp->addr.in6_u.u6_addr32); memcpy(cm_info->loc_addr, child_listen_node->loc_addr, sizeof(cm_info->loc_addr)); if (!iwdev->vsi.dscp_mode) cm_info->user_pri = irdma_iw_get_vlan_prio(child_listen_node->loc_addr, cm_info->user_pri, false); ret = irdma_manage_qhash(iwdev, cm_info, IRDMA_QHASH_TYPE_TCP_SYN, IRDMA_QHASH_MANAGE_TYPE_ADD, NULL, true); if (ret) { kfree(child_listen_node); continue; } trace_irdma_add_mqh_6(iwdev, child_listen_node, ip_dev->dev_addr); child_listen_node->qhash_set = true; spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags); list_add(&child_listen_node->child_listen_list, &cm_parent_listen_node->child_listen_list); spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags); cm_parent_listen_node->cm_core->stats_listen_nodes_created++; } } exit: rtnl_unlock(); return ret; } /** * irdma_add_mqh_4 - Adds multiple qhashes for IPv4 * @iwdev: iWarp device * @cm_info: CM info for parent listen node * @cm_parent_listen_node: The parent listen node * * Adds a qhash and a child listen node for every IPv4 address * on the adapter and adds the associated qhash filter */ static int irdma_add_mqh_4(struct irdma_device *iwdev, struct irdma_cm_info *cm_info, struct irdma_cm_listener *cm_parent_listen_node) { struct net_device *ip_dev; struct in_device *idev; struct irdma_cm_listener *child_listen_node; unsigned long flags; const struct in_ifaddr *ifa; int ret = 0; rtnl_lock(); for_each_netdev(&init_net, ip_dev) { if (!(ip_dev->flags & IFF_UP)) continue; if (((rdma_vlan_dev_vlan_id(ip_dev) >= VLAN_N_VID) || (rdma_vlan_dev_real_dev(ip_dev) != iwdev->netdev)) && ip_dev != iwdev->netdev) continue; idev = in_dev_get(ip_dev); if (!idev) continue; in_dev_for_each_ifa_rtnl(ifa, idev) { ibdev_dbg(&iwdev->ibdev, "CM: Allocating child CM Listener forIP=%pI4, vlan_id=%d, MAC=%pM\n", &ifa->ifa_address, rdma_vlan_dev_vlan_id(ip_dev), ip_dev->dev_addr); child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_KERNEL); cm_parent_listen_node->cm_core->stats_listen_nodes_created++; ibdev_dbg(&iwdev->ibdev, "CM: Allocating child listener %p\n", child_listen_node); if (!child_listen_node) { ibdev_dbg(&iwdev->ibdev, "CM: listener memory allocation\n"); in_dev_put(idev); ret = -ENOMEM; goto exit; } cm_info->vlan_id = rdma_vlan_dev_vlan_id(ip_dev); cm_parent_listen_node->vlan_id = cm_info->vlan_id; memcpy(child_listen_node, cm_parent_listen_node, sizeof(*child_listen_node)); child_listen_node->loc_addr[0] = ntohl(ifa->ifa_address); memcpy(cm_info->loc_addr, child_listen_node->loc_addr, sizeof(cm_info->loc_addr)); if (!iwdev->vsi.dscp_mode) cm_info->user_pri = irdma_iw_get_vlan_prio(child_listen_node->loc_addr, cm_info->user_pri, true); ret = irdma_manage_qhash(iwdev, cm_info, IRDMA_QHASH_TYPE_TCP_SYN, IRDMA_QHASH_MANAGE_TYPE_ADD, NULL, true); if (ret) { kfree(child_listen_node); cm_parent_listen_node->cm_core ->stats_listen_nodes_created--; continue; } trace_irdma_add_mqh_4(iwdev, child_listen_node, ip_dev->dev_addr); child_listen_node->qhash_set = true; spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags); list_add(&child_listen_node->child_listen_list, &cm_parent_listen_node->child_listen_list); spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags); } in_dev_put(idev); } exit: rtnl_unlock(); return ret; } /** * irdma_add_mqh - Adds multiple qhashes * @iwdev: iWarp device * @cm_info: CM info for parent listen node * @cm_listen_node: The parent listen node */ static int irdma_add_mqh(struct irdma_device *iwdev, struct irdma_cm_info *cm_info, struct irdma_cm_listener *cm_listen_node) { if (cm_info->ipv4) return irdma_add_mqh_4(iwdev, cm_info, cm_listen_node); else return irdma_add_mqh_6(iwdev, cm_info, cm_listen_node); } /** * irdma_reset_list_prep - add connection nodes slated for reset to list * @cm_core: cm's core * @listener: pointer to listener node * @reset_list: a list to which cm_node will be selected */ static void irdma_reset_list_prep(struct irdma_cm_core *cm_core, struct irdma_cm_listener *listener, struct list_head *reset_list) { struct irdma_cm_node *cm_node; int bkt; hash_for_each_rcu(cm_core->cm_hash_tbl, bkt, cm_node, list) { if (cm_node->listener == listener && !cm_node->accelerated && refcount_inc_not_zero(&cm_node->refcnt)) list_add(&cm_node->reset_entry, reset_list); } } /** * irdma_dec_refcnt_listen - delete listener and associated cm nodes * @cm_core: cm's core * @listener: pointer to listener node * @free_hanging_nodes: to free associated cm_nodes * @apbvt_del: flag to delete the apbvt */ static int irdma_dec_refcnt_listen(struct irdma_cm_core *cm_core, struct irdma_cm_listener *listener, int free_hanging_nodes, bool apbvt_del) { int err; struct list_head *list_pos; struct list_head *list_temp; struct irdma_cm_node *cm_node; struct list_head reset_list; struct irdma_cm_info nfo; enum irdma_cm_node_state old_state; unsigned long flags; trace_irdma_dec_refcnt_listen(listener, __builtin_return_address(0)); /* free non-accelerated child nodes for this listener */ INIT_LIST_HEAD(&reset_list); if (free_hanging_nodes) { rcu_read_lock(); irdma_reset_list_prep(cm_core, listener, &reset_list); rcu_read_unlock(); } list_for_each_safe (list_pos, list_temp, &reset_list) { cm_node = container_of(list_pos, struct irdma_cm_node, reset_entry); if (cm_node->state >= IRDMA_CM_STATE_FIN_WAIT1) { irdma_rem_ref_cm_node(cm_node); continue; } irdma_cleanup_retrans_entry(cm_node); err = irdma_send_reset(cm_node); if (err) { cm_node->state = IRDMA_CM_STATE_CLOSED; ibdev_dbg(&cm_node->iwdev->ibdev, "CM: send reset failed\n"); } else { old_state = cm_node->state; cm_node->state = IRDMA_CM_STATE_LISTENER_DESTROYED; if (old_state != IRDMA_CM_STATE_MPAREQ_RCVD) irdma_rem_ref_cm_node(cm_node); } } if (refcount_dec_and_test(&listener->refcnt)) { spin_lock_irqsave(&cm_core->listen_list_lock, flags); list_del(&listener->list); spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); if (apbvt_del) irdma_del_apbvt(listener->iwdev, listener->apbvt_entry); memcpy(nfo.loc_addr, listener->loc_addr, sizeof(nfo.loc_addr)); nfo.loc_port = listener->loc_port; nfo.ipv4 = listener->ipv4; nfo.vlan_id = listener->vlan_id; nfo.user_pri = listener->user_pri; nfo.qh_qpid = listener->iwdev->vsi.ilq->qp_id; if (!list_empty(&listener->child_listen_list)) { irdma_del_multiple_qhash(listener->iwdev, &nfo, listener); } else { if (listener->qhash_set) irdma_manage_qhash(listener->iwdev, &nfo, IRDMA_QHASH_TYPE_TCP_SYN, IRDMA_QHASH_MANAGE_TYPE_DELETE, NULL, false); } cm_core->stats_listen_destroyed++; cm_core->stats_listen_nodes_destroyed++; ibdev_dbg(&listener->iwdev->ibdev, "CM: loc_port=0x%04x loc_addr=%pI4 cm_listen_node=%p cm_id=%p qhash_set=%d vlan_id=%d apbvt_del=%d\n", listener->loc_port, listener->loc_addr, listener, listener->cm_id, listener->qhash_set, listener->vlan_id, apbvt_del); kfree(listener); listener = NULL; return 0; } return -EINVAL; } /** * irdma_cm_del_listen - delete a listener * @cm_core: cm's core * @listener: passive connection's listener * @apbvt_del: flag to delete apbvt */ static int irdma_cm_del_listen(struct irdma_cm_core *cm_core, struct irdma_cm_listener *listener, bool apbvt_del) { listener->listener_state = IRDMA_CM_LISTENER_PASSIVE_STATE; listener->cm_id = NULL; return irdma_dec_refcnt_listen(cm_core, listener, 1, apbvt_del); } /** * irdma_addr_resolve_neigh - resolve neighbor address * @iwdev: iwarp device structure * @src_ip: local ip address * @dst_ip: remote ip address * @arpindex: if there is an arp entry */ static int irdma_addr_resolve_neigh(struct irdma_device *iwdev, u32 src_ip, u32 dst_ip, int arpindex) { struct rtable *rt; struct neighbour *neigh; int rc = arpindex; __be32 dst_ipaddr = htonl(dst_ip); __be32 src_ipaddr = htonl(src_ip); rt = ip_route_output(&init_net, dst_ipaddr, src_ipaddr, 0, 0); if (IS_ERR(rt)) { ibdev_dbg(&iwdev->ibdev, "CM: ip_route_output fail\n"); return -EINVAL; } neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr); if (!neigh) goto exit; if (neigh->nud_state & NUD_VALID) rc = irdma_add_arp(iwdev->rf, &dst_ip, true, neigh->ha); else neigh_event_send(neigh, NULL); if (neigh) neigh_release(neigh); exit: ip_rt_put(rt); return rc; } /** * irdma_get_dst_ipv6 - get destination cache entry via ipv6 lookup * @src_addr: local ipv6 sock address * @dst_addr: destination ipv6 sock address */ static struct dst_entry *irdma_get_dst_ipv6(struct sockaddr_in6 *src_addr, struct sockaddr_in6 *dst_addr) { struct dst_entry *dst = NULL; if ((IS_ENABLED(CONFIG_IPV6))) { struct flowi6 fl6 = {}; fl6.daddr = dst_addr->sin6_addr; fl6.saddr = src_addr->sin6_addr; if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL) fl6.flowi6_oif = dst_addr->sin6_scope_id; dst = ip6_route_output(&init_net, NULL, &fl6); } return dst; } /** * irdma_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address * @iwdev: iwarp device structure * @src: local ip address * @dest: remote ip address * @arpindex: if there is an arp entry */ static int irdma_addr_resolve_neigh_ipv6(struct irdma_device *iwdev, u32 *src, u32 *dest, int arpindex) { struct neighbour *neigh; int rc = arpindex; struct dst_entry *dst; struct sockaddr_in6 dst_addr = {}; struct sockaddr_in6 src_addr = {}; dst_addr.sin6_family = AF_INET6; irdma_copy_ip_htonl(dst_addr.sin6_addr.in6_u.u6_addr32, dest); src_addr.sin6_family = AF_INET6; irdma_copy_ip_htonl(src_addr.sin6_addr.in6_u.u6_addr32, src); dst = irdma_get_dst_ipv6(&src_addr, &dst_addr); if (!dst || dst->error) { if (dst) { dst_release(dst); ibdev_dbg(&iwdev->ibdev, "CM: ip6_route_output returned dst->error = %d\n", dst->error); } return -EINVAL; } neigh = dst_neigh_lookup(dst, dst_addr.sin6_addr.in6_u.u6_addr32); if (!neigh) goto exit; ibdev_dbg(&iwdev->ibdev, "CM: dst_neigh_lookup MAC=%pM\n", neigh->ha); trace_irdma_addr_resolve(iwdev, neigh->ha); if (neigh->nud_state & NUD_VALID) rc = irdma_add_arp(iwdev->rf, dest, false, neigh->ha); else neigh_event_send(neigh, NULL); if (neigh) neigh_release(neigh); exit: dst_release(dst); return rc; } /** * irdma_find_node - find a cm node that matches the reference cm node * @cm_core: cm's core * @rem_port: remote tcp port num * @rem_addr: remote ip addr * @loc_port: local tcp port num * @loc_addr: local ip addr * @vlan_id: local VLAN ID */ struct irdma_cm_node *irdma_find_node(struct irdma_cm_core *cm_core, u16 rem_port, u32 *rem_addr, u16 loc_port, u32 *loc_addr, u16 vlan_id) { struct irdma_cm_node *cm_node; u32 key = (rem_port << 16) | loc_port; rcu_read_lock(); hash_for_each_possible_rcu(cm_core->cm_hash_tbl, cm_node, list, key) { if (cm_node->vlan_id == vlan_id && cm_node->loc_port == loc_port && cm_node->rem_port == rem_port && !memcmp(cm_node->loc_addr, loc_addr, sizeof(cm_node->loc_addr)) && !memcmp(cm_node->rem_addr, rem_addr, sizeof(cm_node->rem_addr))) { if (!refcount_inc_not_zero(&cm_node->refcnt)) goto exit; rcu_read_unlock(); trace_irdma_find_node(cm_node, 0, NULL); return cm_node; } } exit: rcu_read_unlock(); /* no owner node */ return NULL; } /** * irdma_add_hte_node - add a cm node to the hash table * @cm_core: cm's core * @cm_node: connection's node */ static void irdma_add_hte_node(struct irdma_cm_core *cm_core, struct irdma_cm_node *cm_node) { unsigned long flags; u32 key = (cm_node->rem_port << 16) | cm_node->loc_port; spin_lock_irqsave(&cm_core->ht_lock, flags); hash_add_rcu(cm_core->cm_hash_tbl, &cm_node->list, key); spin_unlock_irqrestore(&cm_core->ht_lock, flags); } /** * irdma_ipv4_is_lpb - check if loopback * @loc_addr: local addr to compare * @rem_addr: remote address */ bool irdma_ipv4_is_lpb(u32 loc_addr, u32 rem_addr) { return ipv4_is_loopback(htonl(rem_addr)) || (loc_addr == rem_addr); } /** * irdma_ipv6_is_lpb - check if loopback * @loc_addr: local addr to compare * @rem_addr: remote address */ bool irdma_ipv6_is_lpb(u32 *loc_addr, u32 *rem_addr) { struct in6_addr raddr6; irdma_copy_ip_htonl(raddr6.in6_u.u6_addr32, rem_addr); return !memcmp(loc_addr, rem_addr, 16) || ipv6_addr_loopback(&raddr6); } /** * irdma_cm_create_ah - create a cm address handle * @cm_node: The connection manager node to create AH for * @wait: Provides option to wait for ah creation or not */ static int irdma_cm_create_ah(struct irdma_cm_node *cm_node, bool wait) { struct irdma_ah_info ah_info = {}; struct irdma_device *iwdev = cm_node->iwdev; ether_addr_copy(ah_info.mac_addr, iwdev->netdev->dev_addr); ah_info.hop_ttl = 0x40; ah_info.tc_tos = cm_node->tos; ah_info.vsi = &iwdev->vsi; if (cm_node->ipv4) { ah_info.ipv4_valid = true; ah_info.dest_ip_addr[0] = cm_node->rem_addr[0]; ah_info.src_ip_addr[0] = cm_node->loc_addr[0]; ah_info.do_lpbk = irdma_ipv4_is_lpb(ah_info.src_ip_addr[0], ah_info.dest_ip_addr[0]); } else { memcpy(ah_info.dest_ip_addr, cm_node->rem_addr, sizeof(ah_info.dest_ip_addr)); memcpy(ah_info.src_ip_addr, cm_node->loc_addr, sizeof(ah_info.src_ip_addr)); ah_info.do_lpbk = irdma_ipv6_is_lpb(ah_info.src_ip_addr, ah_info.dest_ip_addr); } ah_info.vlan_tag = cm_node->vlan_id; if (cm_node->vlan_id < VLAN_N_VID) { ah_info.insert_vlan_tag = 1; ah_info.vlan_tag |= cm_node->user_pri << VLAN_PRIO_SHIFT; } ah_info.dst_arpindex = irdma_arp_table(iwdev->rf, ah_info.dest_ip_addr, ah_info.ipv4_valid, NULL, IRDMA_ARP_RESOLVE); if (irdma_puda_create_ah(&iwdev->rf->sc_dev, &ah_info, wait, IRDMA_PUDA_RSRC_TYPE_ILQ, cm_node, &cm_node->ah)) return -ENOMEM; trace_irdma_create_ah(cm_node); return 0; } /** * irdma_cm_free_ah - free a cm address handle * @cm_node: The connection manager node to create AH for */ static void irdma_cm_free_ah(struct irdma_cm_node *cm_node) { struct irdma_device *iwdev = cm_node->iwdev; trace_irdma_cm_free_ah(cm_node); irdma_puda_free_ah(&iwdev->rf->sc_dev, cm_node->ah); cm_node->ah = NULL; } /** * irdma_make_cm_node - create a new instance of a cm node * @cm_core: cm's core * @iwdev: iwarp device structure * @cm_info: quad info for connection * @listener: passive connection's listener */ static struct irdma_cm_node * irdma_make_cm_node(struct irdma_cm_core *cm_core, struct irdma_device *iwdev, struct irdma_cm_info *cm_info, struct irdma_cm_listener *listener) { struct irdma_cm_node *cm_node; int oldarpindex; int arpindex; struct net_device *netdev = iwdev->netdev; /* create an hte and cm_node for this instance */ cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC); if (!cm_node) return NULL; /* set our node specific transport info */ cm_node->ipv4 = cm_info->ipv4; cm_node->vlan_id = cm_info->vlan_id; if (cm_node->vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode) cm_node->vlan_id = 0; cm_node->tos = cm_info->tos; cm_node->user_pri = cm_info->user_pri; if (listener) { if (listener->tos != cm_info->tos) ibdev_warn(&iwdev->ibdev, "application TOS[%d] and remote client TOS[%d] mismatch\n", listener->tos, cm_info->tos); if (iwdev->vsi.dscp_mode) { cm_node->user_pri = listener->user_pri; } else { cm_node->tos = max(listener->tos, cm_info->tos); cm_node->user_pri = rt_tos2priority(cm_node->tos); cm_node->user_pri = irdma_iw_get_vlan_prio(cm_info->loc_addr, cm_node->user_pri, cm_info->ipv4); } ibdev_dbg(&iwdev->ibdev, "DCB: listener: TOS:[%d] UP:[%d]\n", cm_node->tos, cm_node->user_pri); trace_irdma_listener_tos(iwdev, cm_node->tos, cm_node->user_pri); } memcpy(cm_node->loc_addr, cm_info->loc_addr, sizeof(cm_node->loc_addr)); memcpy(cm_node->rem_addr, cm_info->rem_addr, sizeof(cm_node->rem_addr)); cm_node->loc_port = cm_info->loc_port; cm_node->rem_port = cm_info->rem_port; cm_node->mpa_frame_rev = IRDMA_CM_DEFAULT_MPA_VER; cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO; cm_node->iwdev = iwdev; cm_node->dev = &iwdev->rf->sc_dev; cm_node->ird_size = cm_node->dev->hw_attrs.max_hw_ird; cm_node->ord_size = cm_node->dev->hw_attrs.max_hw_ord; cm_node->listener = listener; cm_node->cm_id = cm_info->cm_id; ether_addr_copy(cm_node->loc_mac, netdev->dev_addr); spin_lock_init(&cm_node->retrans_list_lock); cm_node->ack_rcvd = false; init_completion(&cm_node->establish_comp); refcount_set(&cm_node->refcnt, 1); /* associate our parent CM core */ cm_node->cm_core = cm_core; cm_node->tcp_cntxt.loc_id = IRDMA_CM_DEFAULT_LOCAL_ID; cm_node->tcp_cntxt.rcv_wscale = iwdev->rcv_wscale; cm_node->tcp_cntxt.rcv_wnd = iwdev->rcv_wnd >> cm_node->tcp_cntxt.rcv_wscale; if (cm_node->ipv4) { cm_node->tcp_cntxt.loc_seq_num = secure_tcp_seq(htonl(cm_node->loc_addr[0]), htonl(cm_node->rem_addr[0]), htons(cm_node->loc_port), htons(cm_node->rem_port)); cm_node->tcp_cntxt.mss = iwdev->vsi.mtu - IRDMA_MTU_TO_MSS_IPV4; } else if (IS_ENABLED(CONFIG_IPV6)) { __be32 loc[4] = { htonl(cm_node->loc_addr[0]), htonl(cm_node->loc_addr[1]), htonl(cm_node->loc_addr[2]), htonl(cm_node->loc_addr[3]) }; __be32 rem[4] = { htonl(cm_node->rem_addr[0]), htonl(cm_node->rem_addr[1]), htonl(cm_node->rem_addr[2]), htonl(cm_node->rem_addr[3]) }; cm_node->tcp_cntxt.loc_seq_num = secure_tcpv6_seq(loc, rem, htons(cm_node->loc_port), htons(cm_node->rem_port)); cm_node->tcp_cntxt.mss = iwdev->vsi.mtu - IRDMA_MTU_TO_MSS_IPV6; } if ((cm_node->ipv4 && irdma_ipv4_is_lpb(cm_node->loc_addr[0], cm_node->rem_addr[0])) || (!cm_node->ipv4 && irdma_ipv6_is_lpb(cm_node->loc_addr, cm_node->rem_addr))) { cm_node->do_lpb = true; arpindex = irdma_arp_table(iwdev->rf, cm_node->rem_addr, cm_node->ipv4, NULL, IRDMA_ARP_RESOLVE); } else { oldarpindex = irdma_arp_table(iwdev->rf, cm_node->rem_addr, cm_node->ipv4, NULL, IRDMA_ARP_RESOLVE); if (cm_node->ipv4) arpindex = irdma_addr_resolve_neigh(iwdev, cm_info->loc_addr[0], cm_info->rem_addr[0], oldarpindex); else if (IS_ENABLED(CONFIG_IPV6)) arpindex = irdma_addr_resolve_neigh_ipv6(iwdev, cm_info->loc_addr, cm_info->rem_addr, oldarpindex); else arpindex = -EINVAL; } if (arpindex < 0) goto err; ether_addr_copy(cm_node->rem_mac, iwdev->rf->arp_table[arpindex].mac_addr); irdma_add_hte_node(cm_core, cm_node); cm_core->stats_nodes_created++; return cm_node; err: kfree(cm_node); return NULL; } static void irdma_destroy_connection(struct irdma_cm_node *cm_node) { struct irdma_cm_core *cm_core = cm_node->cm_core; struct irdma_qp *iwqp; struct irdma_cm_info nfo; /* if the node is destroyed before connection was accelerated */ if (!cm_node->accelerated && cm_node->accept_pend) { ibdev_dbg(&cm_node->iwdev->ibdev, "CM: node destroyed before established\n"); atomic_dec(&cm_node->listener->pend_accepts_cnt); } if (cm_node->close_entry) irdma_handle_close_entry(cm_node, 0); if (cm_node->listener) { irdma_dec_refcnt_listen(cm_core, cm_node->listener, 0, true); } else { if (cm_node->apbvt_set) { irdma_del_apbvt(cm_node->iwdev, cm_node->apbvt_entry); cm_node->apbvt_set = 0; } irdma_get_addr_info(cm_node, &nfo); if (cm_node->qhash_set) { nfo.qh_qpid = cm_node->iwdev->vsi.ilq->qp_id; irdma_manage_qhash(cm_node->iwdev, &nfo, IRDMA_QHASH_TYPE_TCP_ESTABLISHED, IRDMA_QHASH_MANAGE_TYPE_DELETE, NULL, false); cm_node->qhash_set = 0; } } iwqp = cm_node->iwqp; if (iwqp) { cm_node->cm_id->rem_ref(cm_node->cm_id); cm_node->cm_id = NULL; iwqp->cm_id = NULL; irdma_qp_rem_ref(&iwqp->ibqp); cm_node->iwqp = NULL; } else if (cm_node->qhash_set) { irdma_get_addr_info(cm_node, &nfo); nfo.qh_qpid = cm_node->iwdev->vsi.ilq->qp_id; irdma_manage_qhash(cm_node->iwdev, &nfo, IRDMA_QHASH_TYPE_TCP_ESTABLISHED, IRDMA_QHASH_MANAGE_TYPE_DELETE, NULL, false); cm_node->qhash_set = 0; } cm_core->cm_free_ah(cm_node); } /** * irdma_rem_ref_cm_node - destroy an instance of a cm node * @cm_node: connection's node */ void irdma_rem_ref_cm_node(struct irdma_cm_node *cm_node) { struct irdma_cm_core *cm_core = cm_node->cm_core; unsigned long flags; trace_irdma_rem_ref_cm_node(cm_node, 0, __builtin_return_address(0)); spin_lock_irqsave(&cm_core->ht_lock, flags); if (!refcount_dec_and_test(&cm_node->refcnt)) { spin_unlock_irqrestore(&cm_core->ht_lock, flags); return; } if (cm_node->iwqp) { cm_node->iwqp->cm_node = NULL; cm_node->iwqp->cm_id = NULL; } hash_del_rcu(&cm_node->list); cm_node->cm_core->stats_nodes_destroyed++; spin_unlock_irqrestore(&cm_core->ht_lock, flags); irdma_destroy_connection(cm_node); kfree_rcu(cm_node, rcu_head); } /** * irdma_handle_fin_pkt - FIN packet received * @cm_node: connection's node */ static void irdma_handle_fin_pkt(struct irdma_cm_node *cm_node) { switch (cm_node->state) { case IRDMA_CM_STATE_SYN_RCVD: case IRDMA_CM_STATE_SYN_SENT: case IRDMA_CM_STATE_ESTABLISHED: case IRDMA_CM_STATE_MPAREJ_RCVD: cm_node->tcp_cntxt.rcv_nxt++; irdma_cleanup_retrans_entry(cm_node); cm_node->state = IRDMA_CM_STATE_LAST_ACK; irdma_send_fin(cm_node); break; case IRDMA_CM_STATE_MPAREQ_SENT: irdma_create_event(cm_node, IRDMA_CM_EVENT_ABORTED); cm_node->tcp_cntxt.rcv_nxt++; irdma_cleanup_retrans_entry(cm_node); cm_node->state = IRDMA_CM_STATE_CLOSED; refcount_inc(&cm_node->refcnt); irdma_send_reset(cm_node); break; case IRDMA_CM_STATE_FIN_WAIT1: cm_node->tcp_cntxt.rcv_nxt++; irdma_cleanup_retrans_entry(cm_node); cm_node->state = IRDMA_CM_STATE_CLOSING; irdma_send_ack(cm_node); /* * Wait for ACK as this is simultaneous close. * After we receive ACK, do not send anything. * Just rm the node. */ break; case IRDMA_CM_STATE_FIN_WAIT2: cm_node->tcp_cntxt.rcv_nxt++; irdma_cleanup_retrans_entry(cm_node); cm_node->state = IRDMA_CM_STATE_TIME_WAIT; irdma_send_ack(cm_node); irdma_schedule_cm_timer(cm_node, NULL, IRDMA_TIMER_TYPE_CLOSE, 1, 0); break; case IRDMA_CM_STATE_TIME_WAIT: cm_node->tcp_cntxt.rcv_nxt++; irdma_cleanup_retrans_entry(cm_node); cm_node->state = IRDMA_CM_STATE_CLOSED; irdma_rem_ref_cm_node(cm_node); break; case IRDMA_CM_STATE_OFFLOADED: default: ibdev_dbg(&cm_node->iwdev->ibdev, "CM: bad state node state = %d\n", cm_node->state); break; } } /** * irdma_handle_rst_pkt - process received RST packet * @cm_node: connection's node * @rbuf: receive buffer */ static void irdma_handle_rst_pkt(struct irdma_cm_node *cm_node, struct irdma_puda_buf *rbuf) { ibdev_dbg(&cm_node->iwdev->ibdev, "CM: caller: %pS cm_node=%p state=%d rem_port=0x%04x loc_port=0x%04x rem_addr=%pI4 loc_addr=%pI4\n", __builtin_return_address(0), cm_node, cm_node->state, cm_node->rem_port, cm_node->loc_port, cm_node->rem_addr, cm_node->loc_addr); irdma_cleanup_retrans_entry(cm_node); switch (cm_node->state) { case IRDMA_CM_STATE_SYN_SENT: case IRDMA_CM_STATE_MPAREQ_SENT: switch (cm_node->mpa_frame_rev) { case IETF_MPA_V2: /* Drop down to MPA_V1*/ cm_node->mpa_frame_rev = IETF_MPA_V1; /* send a syn and goto syn sent state */ cm_node->state = IRDMA_CM_STATE_SYN_SENT; if (irdma_send_syn(cm_node, 0)) irdma_active_open_err(cm_node, false); break; case IETF_MPA_V1: default: irdma_active_open_err(cm_node, false); break; } break; case IRDMA_CM_STATE_MPAREQ_RCVD: atomic_inc(&cm_node->passive_state); break; case IRDMA_CM_STATE_ESTABLISHED: case IRDMA_CM_STATE_SYN_RCVD: case IRDMA_CM_STATE_LISTENING: irdma_passive_open_err(cm_node, false); break; case IRDMA_CM_STATE_OFFLOADED: irdma_active_open_err(cm_node, false); break; case IRDMA_CM_STATE_CLOSED: break; case IRDMA_CM_STATE_FIN_WAIT2: case IRDMA_CM_STATE_FIN_WAIT1: case IRDMA_CM_STATE_LAST_ACK: case IRDMA_CM_STATE_TIME_WAIT: cm_node->state = IRDMA_CM_STATE_CLOSED; irdma_rem_ref_cm_node(cm_node); break; default: break; } } /** * irdma_handle_rcv_mpa - Process a recv'd mpa buffer * @cm_node: connection's node * @rbuf: receive buffer */ static void irdma_handle_rcv_mpa(struct irdma_cm_node *cm_node, struct irdma_puda_buf *rbuf) { int err; int datasize = rbuf->datalen; u8 *dataloc = rbuf->data; enum irdma_cm_event_type type = IRDMA_CM_EVENT_UNKNOWN; u32 res_type; err = irdma_parse_mpa(cm_node, dataloc, &res_type, datasize); if (err) { if (cm_node->state == IRDMA_CM_STATE_MPAREQ_SENT) irdma_active_open_err(cm_node, true); else irdma_passive_open_err(cm_node, true); return; } switch (cm_node->state) { case IRDMA_CM_STATE_ESTABLISHED: if (res_type == IRDMA_MPA_REQUEST_REJECT) ibdev_dbg(&cm_node->iwdev->ibdev, "CM: state for reject\n"); cm_node->state = IRDMA_CM_STATE_MPAREQ_RCVD; type = IRDMA_CM_EVENT_MPA_REQ; irdma_send_ack(cm_node); /* ACK received MPA request */ atomic_set(&cm_node->passive_state, IRDMA_PASSIVE_STATE_INDICATED); break; case IRDMA_CM_STATE_MPAREQ_SENT: irdma_cleanup_retrans_entry(cm_node); if (res_type == IRDMA_MPA_REQUEST_REJECT) { type = IRDMA_CM_EVENT_MPA_REJECT; cm_node->state = IRDMA_CM_STATE_MPAREJ_RCVD; } else { type = IRDMA_CM_EVENT_CONNECTED; cm_node->state = IRDMA_CM_STATE_OFFLOADED; } irdma_send_ack(cm_node); break; default: ibdev_dbg(&cm_node->iwdev->ibdev, "CM: wrong cm_node state =%d\n", cm_node->state); break; } irdma_create_event(cm_node, type); } /** * irdma_check_syn - Check for error on received syn ack * @cm_node: connection's node * @tcph: pointer tcp header */ static int irdma_check_syn(struct irdma_cm_node *cm_node, struct tcphdr *tcph) { if (ntohl(tcph->ack_seq) != cm_node->tcp_cntxt.loc_seq_num) { irdma_active_open_err(cm_node, true); return 1; } return 0; } /** * irdma_check_seq - check seq numbers if OK * @cm_node: connection's node * @tcph: pointer tcp header */ static int irdma_check_seq(struct irdma_cm_node *cm_node, struct tcphdr *tcph) { u32 seq; u32 ack_seq; u32 loc_seq_num = cm_node->tcp_cntxt.loc_seq_num; u32 rcv_nxt = cm_node->tcp_cntxt.rcv_nxt; u32 rcv_wnd; int err = 0; seq = ntohl(tcph->seq); ack_seq = ntohl(tcph->ack_seq); rcv_wnd = cm_node->tcp_cntxt.rcv_wnd; if (ack_seq != loc_seq_num || !between(seq, rcv_nxt, (rcv_nxt + rcv_wnd))) err = -1; if (err) ibdev_dbg(&cm_node->iwdev->ibdev, "CM: seq number err\n"); return err; } void irdma_add_conn_est_qh(struct irdma_cm_node *cm_node) { struct irdma_cm_info nfo; irdma_get_addr_info(cm_node, &nfo); nfo.qh_qpid = cm_node->iwdev->vsi.ilq->qp_id; irdma_manage_qhash(cm_node->iwdev, &nfo, IRDMA_QHASH_TYPE_TCP_ESTABLISHED, IRDMA_QHASH_MANAGE_TYPE_ADD, cm_node, false); cm_node->qhash_set = true; } /** * irdma_handle_syn_pkt - is for Passive node * @cm_node: connection's node * @rbuf: receive buffer */ static void irdma_handle_syn_pkt(struct irdma_cm_node *cm_node, struct irdma_puda_buf *rbuf) { struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph; int err; u32 inc_sequence; int optionsize; optionsize = (tcph->doff << 2) - sizeof(struct tcphdr); inc_sequence = ntohl(tcph->seq); switch (cm_node->state) { case IRDMA_CM_STATE_SYN_SENT: case IRDMA_CM_STATE_MPAREQ_SENT: /* Rcvd syn on active open connection */ irdma_active_open_err(cm_node, 1); break; case IRDMA_CM_STATE_LISTENING: /* Passive OPEN */ if (atomic_read(&cm_node->listener->pend_accepts_cnt) > cm_node->listener->backlog) { cm_node->cm_core->stats_backlog_drops++; irdma_passive_open_err(cm_node, false); break; } err = irdma_handle_tcp_options(cm_node, tcph, optionsize, 1); if (err) { irdma_passive_open_err(cm_node, false); /* drop pkt */ break; } err = cm_node->cm_core->cm_create_ah(cm_node, false); if (err) { irdma_passive_open_err(cm_node, false); /* drop pkt */ break; } cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1; cm_node->accept_pend = 1; atomic_inc(&cm_node->listener->pend_accepts_cnt); cm_node->state = IRDMA_CM_STATE_SYN_RCVD; break; case IRDMA_CM_STATE_CLOSED: irdma_cleanup_retrans_entry(cm_node); refcount_inc(&cm_node->refcnt); irdma_send_reset(cm_node); break; case IRDMA_CM_STATE_OFFLOADED: case IRDMA_CM_STATE_ESTABLISHED: case IRDMA_CM_STATE_FIN_WAIT1: case IRDMA_CM_STATE_FIN_WAIT2: case IRDMA_CM_STATE_MPAREQ_RCVD: case IRDMA_CM_STATE_LAST_ACK: case IRDMA_CM_STATE_CLOSING: case IRDMA_CM_STATE_UNKNOWN: default: break; } } /** * irdma_handle_synack_pkt - Process SYN+ACK packet (active side) * @cm_node: connection's node * @rbuf: receive buffer */ static void irdma_handle_synack_pkt(struct irdma_cm_node *cm_node, struct irdma_puda_buf *rbuf) { struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph; int err; u32 inc_sequence; int optionsize; optionsize = (tcph->doff << 2) - sizeof(struct tcphdr); inc_sequence = ntohl(tcph->seq); switch (cm_node->state) { case IRDMA_CM_STATE_SYN_SENT: irdma_cleanup_retrans_entry(cm_node); /* active open */ if (irdma_check_syn(cm_node, tcph)) { ibdev_dbg(&cm_node->iwdev->ibdev, "CM: check syn fail\n"); return; } cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq); /* setup options */ err = irdma_handle_tcp_options(cm_node, tcph, optionsize, 0); if (err) { ibdev_dbg(&cm_node->iwdev->ibdev, "CM: cm_node=%p tcp_options failed\n", cm_node); break; } irdma_cleanup_retrans_entry(cm_node); cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1; irdma_send_ack(cm_node); /* ACK for the syn_ack */ err = irdma_send_mpa_request(cm_node); if (err) { ibdev_dbg(&cm_node->iwdev->ibdev, "CM: cm_node=%p irdma_send_mpa_request failed\n", cm_node); break; } cm_node->state = IRDMA_CM_STATE_MPAREQ_SENT; break; case IRDMA_CM_STATE_MPAREQ_RCVD: irdma_passive_open_err(cm_node, true); break; case IRDMA_CM_STATE_LISTENING: cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq); irdma_cleanup_retrans_entry(cm_node); cm_node->state = IRDMA_CM_STATE_CLOSED; irdma_send_reset(cm_node); break; case IRDMA_CM_STATE_CLOSED: cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq); irdma_cleanup_retrans_entry(cm_node); refcount_inc(&cm_node->refcnt); irdma_send_reset(cm_node); break; case IRDMA_CM_STATE_ESTABLISHED: case IRDMA_CM_STATE_FIN_WAIT1: case IRDMA_CM_STATE_FIN_WAIT2: case IRDMA_CM_STATE_LAST_ACK: case IRDMA_CM_STATE_OFFLOADED: case IRDMA_CM_STATE_CLOSING: case IRDMA_CM_STATE_UNKNOWN: case IRDMA_CM_STATE_MPAREQ_SENT: default: break; } } /** * irdma_handle_ack_pkt - process packet with ACK * @cm_node: connection's node * @rbuf: receive buffer */ static int irdma_handle_ack_pkt(struct irdma_cm_node *cm_node, struct irdma_puda_buf *rbuf) { struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph; u32 inc_sequence; int ret; int optionsize; u32 datasize = rbuf->datalen; optionsize = (tcph->doff << 2) - sizeof(struct tcphdr); if (irdma_check_seq(cm_node, tcph)) return -EINVAL; inc_sequence = ntohl(tcph->seq); switch (cm_node->state) { case IRDMA_CM_STATE_SYN_RCVD: irdma_cleanup_retrans_entry(cm_node); ret = irdma_handle_tcp_options(cm_node, tcph, optionsize, 1); if (ret) return ret; cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq); cm_node->state = IRDMA_CM_STATE_ESTABLISHED; if (datasize) { cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; irdma_handle_rcv_mpa(cm_node, rbuf); } break; case IRDMA_CM_STATE_ESTABLISHED: irdma_cleanup_retrans_entry(cm_node); if (datasize) { cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; irdma_handle_rcv_mpa(cm_node, rbuf); } break; case IRDMA_CM_STATE_MPAREQ_SENT: cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq); if (datasize) { cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; cm_node->ack_rcvd = false; irdma_handle_rcv_mpa(cm_node, rbuf); } else { cm_node->ack_rcvd = true; } break; case IRDMA_CM_STATE_LISTENING: irdma_cleanup_retrans_entry(cm_node); cm_node->state = IRDMA_CM_STATE_CLOSED; irdma_send_reset(cm_node); break; case IRDMA_CM_STATE_CLOSED: irdma_cleanup_retrans_entry(cm_node); refcount_inc(&cm_node->refcnt); irdma_send_reset(cm_node); break; case IRDMA_CM_STATE_LAST_ACK: case IRDMA_CM_STATE_CLOSING: irdma_cleanup_retrans_entry(cm_node); cm_node->state = IRDMA_CM_STATE_CLOSED; irdma_rem_ref_cm_node(cm_node); break; case IRDMA_CM_STATE_FIN_WAIT1: irdma_cleanup_retrans_entry(cm_node); cm_node->state = IRDMA_CM_STATE_FIN_WAIT2; break; case IRDMA_CM_STATE_SYN_SENT: case IRDMA_CM_STATE_FIN_WAIT2: case IRDMA_CM_STATE_OFFLOADED: case IRDMA_CM_STATE_MPAREQ_RCVD: case IRDMA_CM_STATE_UNKNOWN: default: irdma_cleanup_retrans_entry(cm_node); break; } return 0; } /** * irdma_process_pkt - process cm packet * @cm_node: connection's node * @rbuf: receive buffer */ static void irdma_process_pkt(struct irdma_cm_node *cm_node, struct irdma_puda_buf *rbuf) { enum irdma_tcpip_pkt_type pkt_type = IRDMA_PKT_TYPE_UNKNOWN; struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph; u32 fin_set = 0; int err; if (tcph->rst) { pkt_type = IRDMA_PKT_TYPE_RST; } else if (tcph->syn) { pkt_type = IRDMA_PKT_TYPE_SYN; if (tcph->ack) pkt_type = IRDMA_PKT_TYPE_SYNACK; } else if (tcph->ack) { pkt_type = IRDMA_PKT_TYPE_ACK; } if (tcph->fin) fin_set = 1; switch (pkt_type) { case IRDMA_PKT_TYPE_SYN: irdma_handle_syn_pkt(cm_node, rbuf); break; case IRDMA_PKT_TYPE_SYNACK: irdma_handle_synack_pkt(cm_node, rbuf); break; case IRDMA_PKT_TYPE_ACK: err = irdma_handle_ack_pkt(cm_node, rbuf); if (fin_set && !err) irdma_handle_fin_pkt(cm_node); break; case IRDMA_PKT_TYPE_RST: irdma_handle_rst_pkt(cm_node, rbuf); break; default: if (fin_set && (!irdma_check_seq(cm_node, (struct tcphdr *)rbuf->tcph))) irdma_handle_fin_pkt(cm_node); break; } } /** * irdma_make_listen_node - create a listen node with params * @cm_core: cm's core * @iwdev: iwarp device structure * @cm_info: quad info for connection */ static struct irdma_cm_listener * irdma_make_listen_node(struct irdma_cm_core *cm_core, struct irdma_device *iwdev, struct irdma_cm_info *cm_info) { struct irdma_cm_listener *listener; unsigned long flags; /* cannot have multiple matching listeners */ listener = irdma_find_listener(cm_core, cm_info->loc_addr, cm_info->ipv4, cm_info->loc_port, cm_info->vlan_id, IRDMA_CM_LISTENER_EITHER_STATE); if (listener && listener->listener_state == IRDMA_CM_LISTENER_ACTIVE_STATE) { refcount_dec(&listener->refcnt); return NULL; } if (!listener) { /* create a CM listen node * 1/2 node to compare incoming traffic to */ listener = kzalloc(sizeof(*listener), GFP_KERNEL); if (!listener) return NULL; cm_core->stats_listen_nodes_created++; memcpy(listener->loc_addr, cm_info->loc_addr, sizeof(listener->loc_addr)); listener->loc_port = cm_info->loc_port; INIT_LIST_HEAD(&listener->child_listen_list); refcount_set(&listener->refcnt, 1); } else { listener->reused_node = 1; } listener->cm_id = cm_info->cm_id; listener->ipv4 = cm_info->ipv4; listener->vlan_id = cm_info->vlan_id; atomic_set(&listener->pend_accepts_cnt, 0); listener->cm_core = cm_core; listener->iwdev = iwdev; listener->backlog = cm_info->backlog; listener->listener_state = IRDMA_CM_LISTENER_ACTIVE_STATE; if (!listener->reused_node) { spin_lock_irqsave(&cm_core->listen_list_lock, flags); list_add(&listener->list, &cm_core->listen_list); spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); } return listener; } /** * irdma_create_cm_node - make a connection node with params * @cm_core: cm's core * @iwdev: iwarp device structure * @conn_param: connection parameters * @cm_info: quad info for connection * @caller_cm_node: pointer to cm_node structure to return */ static int irdma_create_cm_node(struct irdma_cm_core *cm_core, struct irdma_device *iwdev, struct iw_cm_conn_param *conn_param, struct irdma_cm_info *cm_info, struct irdma_cm_node **caller_cm_node) { struct irdma_cm_node *cm_node; u16 private_data_len = conn_param->private_data_len; const void *private_data = conn_param->private_data; /* create a CM connection node */ cm_node = irdma_make_cm_node(cm_core, iwdev, cm_info, NULL); if (!cm_node) return -ENOMEM; /* set our node side to client (active) side */ cm_node->tcp_cntxt.client = 1; cm_node->tcp_cntxt.rcv_wscale = IRDMA_CM_DEFAULT_RCV_WND_SCALE; irdma_record_ird_ord(cm_node, conn_param->ird, conn_param->ord); cm_node->pdata.size = private_data_len; cm_node->pdata.addr = cm_node->pdata_buf; memcpy(cm_node->pdata_buf, private_data, private_data_len); *caller_cm_node = cm_node; return 0; } /** * irdma_cm_reject - reject and teardown a connection * @cm_node: connection's node * @pdata: ptr to private data for reject * @plen: size of private data */ static int irdma_cm_reject(struct irdma_cm_node *cm_node, const void *pdata, u8 plen) { int ret; int passive_state; if (cm_node->tcp_cntxt.client) return 0; irdma_cleanup_retrans_entry(cm_node); passive_state = atomic_add_return(1, &cm_node->passive_state); if (passive_state == IRDMA_SEND_RESET_EVENT) { cm_node->state = IRDMA_CM_STATE_CLOSED; irdma_rem_ref_cm_node(cm_node); return 0; } if (cm_node->state == IRDMA_CM_STATE_LISTENER_DESTROYED) { irdma_rem_ref_cm_node(cm_node); return 0; } ret = irdma_send_mpa_reject(cm_node, pdata, plen); if (!ret) return 0; cm_node->state = IRDMA_CM_STATE_CLOSED; if (irdma_send_reset(cm_node)) ibdev_dbg(&cm_node->iwdev->ibdev, "CM: send reset failed\n"); return ret; } /** * irdma_cm_close - close of cm connection * @cm_node: connection's node */ static int irdma_cm_close(struct irdma_cm_node *cm_node) { switch (cm_node->state) { case IRDMA_CM_STATE_SYN_RCVD: case IRDMA_CM_STATE_SYN_SENT: case IRDMA_CM_STATE_ONE_SIDE_ESTABLISHED: case IRDMA_CM_STATE_ESTABLISHED: case IRDMA_CM_STATE_ACCEPTING: case IRDMA_CM_STATE_MPAREQ_SENT: case IRDMA_CM_STATE_MPAREQ_RCVD: irdma_cleanup_retrans_entry(cm_node); irdma_send_reset(cm_node); break; case IRDMA_CM_STATE_CLOSE_WAIT: cm_node->state = IRDMA_CM_STATE_LAST_ACK; irdma_send_fin(cm_node); break; case IRDMA_CM_STATE_FIN_WAIT1: case IRDMA_CM_STATE_FIN_WAIT2: case IRDMA_CM_STATE_LAST_ACK: case IRDMA_CM_STATE_TIME_WAIT: case IRDMA_CM_STATE_CLOSING: return -EINVAL; case IRDMA_CM_STATE_LISTENING: irdma_cleanup_retrans_entry(cm_node); irdma_send_reset(cm_node); break; case IRDMA_CM_STATE_MPAREJ_RCVD: case IRDMA_CM_STATE_UNKNOWN: case IRDMA_CM_STATE_INITED: case IRDMA_CM_STATE_CLOSED: case IRDMA_CM_STATE_LISTENER_DESTROYED: irdma_rem_ref_cm_node(cm_node); break; case IRDMA_CM_STATE_OFFLOADED: if (cm_node->send_entry) ibdev_dbg(&cm_node->iwdev->ibdev, "CM: CM send_entry in OFFLOADED state\n"); irdma_rem_ref_cm_node(cm_node); break; } return 0; } /** * irdma_receive_ilq - recv an ETHERNET packet, and process it * through CM * @vsi: VSI structure of dev * @rbuf: receive buffer */ void irdma_receive_ilq(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *rbuf) { struct irdma_cm_node *cm_node; struct irdma_cm_listener *listener; struct iphdr *iph; struct ipv6hdr *ip6h; struct tcphdr *tcph; struct irdma_cm_info cm_info = {}; struct irdma_device *iwdev = vsi->back_vsi; struct irdma_cm_core *cm_core = &iwdev->cm_core; struct vlan_ethhdr *ethh; u16 vtag; /* if vlan, then maclen = 18 else 14 */ iph = (struct iphdr *)rbuf->iph; print_hex_dump_debug("ILQ: RECEIVE ILQ BUFFER", DUMP_PREFIX_OFFSET, 16, 8, rbuf->mem.va, rbuf->totallen, false); if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { if (rbuf->vlan_valid) { vtag = rbuf->vlan_id; cm_info.user_pri = (vtag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; cm_info.vlan_id = vtag & VLAN_VID_MASK; } else { cm_info.vlan_id = 0xFFFF; } } else { ethh = rbuf->mem.va; if (ethh->h_vlan_proto == htons(ETH_P_8021Q)) { vtag = ntohs(ethh->h_vlan_TCI); cm_info.user_pri = (vtag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; cm_info.vlan_id = vtag & VLAN_VID_MASK; ibdev_dbg(&cm_core->iwdev->ibdev, "CM: vlan_id=%d\n", cm_info.vlan_id); } else { cm_info.vlan_id = 0xFFFF; } } tcph = (struct tcphdr *)rbuf->tcph; if (rbuf->ipv4) { cm_info.loc_addr[0] = ntohl(iph->daddr); cm_info.rem_addr[0] = ntohl(iph->saddr); cm_info.ipv4 = true; cm_info.tos = iph->tos; } else { ip6h = (struct ipv6hdr *)rbuf->iph; irdma_copy_ip_ntohl(cm_info.loc_addr, ip6h->daddr.in6_u.u6_addr32); irdma_copy_ip_ntohl(cm_info.rem_addr, ip6h->saddr.in6_u.u6_addr32); cm_info.ipv4 = false; cm_info.tos = (ip6h->priority << 4) | (ip6h->flow_lbl[0] >> 4); } cm_info.loc_port = ntohs(tcph->dest); cm_info.rem_port = ntohs(tcph->source); cm_node = irdma_find_node(cm_core, cm_info.rem_port, cm_info.rem_addr, cm_info.loc_port, cm_info.loc_addr, cm_info.vlan_id); if (!cm_node) { /* Only type of packet accepted are for the * PASSIVE open (syn only) */ if (!tcph->syn || tcph->ack) return; listener = irdma_find_listener(cm_core, cm_info.loc_addr, cm_info.ipv4, cm_info.loc_port, cm_info.vlan_id, IRDMA_CM_LISTENER_ACTIVE_STATE); if (!listener) { cm_info.cm_id = NULL; ibdev_dbg(&cm_core->iwdev->ibdev, "CM: no listener found\n"); return; } cm_info.cm_id = listener->cm_id; cm_node = irdma_make_cm_node(cm_core, iwdev, &cm_info, listener); if (!cm_node) { ibdev_dbg(&cm_core->iwdev->ibdev, "CM: allocate node failed\n"); refcount_dec(&listener->refcnt); return; } if (!tcph->rst && !tcph->fin) { cm_node->state = IRDMA_CM_STATE_LISTENING; } else { irdma_rem_ref_cm_node(cm_node); return; } refcount_inc(&cm_node->refcnt); } else if (cm_node->state == IRDMA_CM_STATE_OFFLOADED) { irdma_rem_ref_cm_node(cm_node); return; } irdma_process_pkt(cm_node, rbuf); irdma_rem_ref_cm_node(cm_node); } static int irdma_add_qh(struct irdma_cm_node *cm_node, bool active) { if (!active) irdma_add_conn_est_qh(cm_node); return 0; } static void irdma_cm_free_ah_nop(struct irdma_cm_node *cm_node) { } /** * irdma_setup_cm_core - setup top level instance of a cm core * @iwdev: iwarp device structure * @rdma_ver: HW version */ int irdma_setup_cm_core(struct irdma_device *iwdev, u8 rdma_ver) { struct irdma_cm_core *cm_core = &iwdev->cm_core; cm_core->iwdev = iwdev; cm_core->dev = &iwdev->rf->sc_dev; /* Handles CM event work items send to Iwarp core */ cm_core->event_wq = alloc_ordered_workqueue("iwarp-event-wq", 0); if (!cm_core->event_wq) return -ENOMEM; INIT_LIST_HEAD(&cm_core->listen_list); timer_setup(&cm_core->tcp_timer, irdma_cm_timer_tick, 0); spin_lock_init(&cm_core->ht_lock); spin_lock_init(&cm_core->listen_list_lock); spin_lock_init(&cm_core->apbvt_lock); switch (rdma_ver) { case IRDMA_GEN_1: cm_core->form_cm_frame = irdma_form_uda_cm_frame; cm_core->cm_create_ah = irdma_add_qh; cm_core->cm_free_ah = irdma_cm_free_ah_nop; break; case IRDMA_GEN_2: default: cm_core->form_cm_frame = irdma_form_ah_cm_frame; cm_core->cm_create_ah = irdma_cm_create_ah; cm_core->cm_free_ah = irdma_cm_free_ah; } return 0; } /** * irdma_cleanup_cm_core - deallocate a top level instance of a * cm core * @cm_core: cm's core */ void irdma_cleanup_cm_core(struct irdma_cm_core *cm_core) { if (!cm_core) return; del_timer_sync(&cm_core->tcp_timer); destroy_workqueue(cm_core->event_wq); cm_core->dev->ws_reset(&cm_core->iwdev->vsi); } /** * irdma_init_tcp_ctx - setup qp context * @cm_node: connection's node * @tcp_info: offload info for tcp * @iwqp: associate qp for the connection */ static void irdma_init_tcp_ctx(struct irdma_cm_node *cm_node, struct irdma_tcp_offload_info *tcp_info, struct irdma_qp *iwqp) { tcp_info->ipv4 = cm_node->ipv4; tcp_info->drop_ooo_seg = !iwqp->iwdev->iw_ooo; tcp_info->wscale = true; tcp_info->ignore_tcp_opt = true; tcp_info->ignore_tcp_uns_opt = true; tcp_info->no_nagle = false; tcp_info->ttl = IRDMA_DEFAULT_TTL; tcp_info->rtt_var = IRDMA_DEFAULT_RTT_VAR; tcp_info->ss_thresh = IRDMA_DEFAULT_SS_THRESH; tcp_info->rexmit_thresh = IRDMA_DEFAULT_REXMIT_THRESH; tcp_info->tcp_state = IRDMA_TCP_STATE_ESTABLISHED; tcp_info->snd_wscale = cm_node->tcp_cntxt.snd_wscale; tcp_info->rcv_wscale = cm_node->tcp_cntxt.rcv_wscale; tcp_info->snd_nxt = cm_node->tcp_cntxt.loc_seq_num; tcp_info->snd_wnd = cm_node->tcp_cntxt.snd_wnd; tcp_info->rcv_nxt = cm_node->tcp_cntxt.rcv_nxt; tcp_info->snd_max = cm_node->tcp_cntxt.loc_seq_num; tcp_info->snd_una = cm_node->tcp_cntxt.loc_seq_num; tcp_info->cwnd = 2 * cm_node->tcp_cntxt.mss; tcp_info->snd_wl1 = cm_node->tcp_cntxt.rcv_nxt; tcp_info->snd_wl2 = cm_node->tcp_cntxt.loc_seq_num; tcp_info->max_snd_window = cm_node->tcp_cntxt.max_snd_wnd; tcp_info->rcv_wnd = cm_node->tcp_cntxt.rcv_wnd << cm_node->tcp_cntxt.rcv_wscale; tcp_info->flow_label = 0; tcp_info->snd_mss = (u32)cm_node->tcp_cntxt.mss; tcp_info->tos = cm_node->tos; if (cm_node->vlan_id < VLAN_N_VID) { tcp_info->insert_vlan_tag = true; tcp_info->vlan_tag = cm_node->vlan_id; tcp_info->vlan_tag |= cm_node->user_pri << VLAN_PRIO_SHIFT; } if (cm_node->ipv4) { tcp_info->src_port = cm_node->loc_port; tcp_info->dst_port = cm_node->rem_port; tcp_info->dest_ip_addr[3] = cm_node->rem_addr[0]; tcp_info->local_ipaddr[3] = cm_node->loc_addr[0]; tcp_info->arp_idx = (u16)irdma_arp_table(iwqp->iwdev->rf, &tcp_info->dest_ip_addr[3], true, NULL, IRDMA_ARP_RESOLVE); } else { tcp_info->src_port = cm_node->loc_port; tcp_info->dst_port = cm_node->rem_port; memcpy(tcp_info->dest_ip_addr, cm_node->rem_addr, sizeof(tcp_info->dest_ip_addr)); memcpy(tcp_info->local_ipaddr, cm_node->loc_addr, sizeof(tcp_info->local_ipaddr)); tcp_info->arp_idx = (u16)irdma_arp_table(iwqp->iwdev->rf, &tcp_info->dest_ip_addr[0], false, NULL, IRDMA_ARP_RESOLVE); } } /** * irdma_cm_init_tsa_conn - setup qp for RTS * @iwqp: associate qp for the connection * @cm_node: connection's node */ static void irdma_cm_init_tsa_conn(struct irdma_qp *iwqp, struct irdma_cm_node *cm_node) { struct irdma_iwarp_offload_info *iwarp_info; struct irdma_qp_host_ctx_info *ctx_info; iwarp_info = &iwqp->iwarp_info; ctx_info = &iwqp->ctx_info; ctx_info->tcp_info = &iwqp->tcp_info; ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id; ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id; iwarp_info->ord_size = cm_node->ord_size; iwarp_info->ird_size = cm_node->ird_size; iwarp_info->rd_en = true; iwarp_info->rdmap_ver = 1; iwarp_info->ddp_ver = 1; iwarp_info->pd_id = iwqp->iwpd->sc_pd.pd_id; ctx_info->tcp_info_valid = true; ctx_info->iwarp_info_valid = true; ctx_info->user_pri = cm_node->user_pri; irdma_init_tcp_ctx(cm_node, &iwqp->tcp_info, iwqp); if (cm_node->snd_mark_en) { iwarp_info->snd_mark_en = true; iwarp_info->snd_mark_offset = (iwqp->tcp_info.snd_nxt & SNDMARKER_SEQNMASK) + cm_node->lsmm_size; } cm_node->state = IRDMA_CM_STATE_OFFLOADED; iwqp->tcp_info.tcp_state = IRDMA_TCP_STATE_ESTABLISHED; iwqp->tcp_info.src_mac_addr_idx = iwqp->iwdev->mac_ip_table_idx; if (cm_node->rcv_mark_en) { iwarp_info->rcv_mark_en = true; iwarp_info->align_hdrs = true; } irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info); /* once tcp_info is set, no need to do it again */ ctx_info->tcp_info_valid = false; ctx_info->iwarp_info_valid = false; } /** * irdma_cm_disconn - when a connection is being closed * @iwqp: associated qp for the connection */ void irdma_cm_disconn(struct irdma_qp *iwqp) { struct irdma_device *iwdev = iwqp->iwdev; struct disconn_work *work; unsigned long flags; work = kzalloc(sizeof(*work), GFP_ATOMIC); if (!work) return; spin_lock_irqsave(&iwdev->rf->qptable_lock, flags); if (!iwdev->rf->qp_table[iwqp->ibqp.qp_num]) { spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags); ibdev_dbg(&iwdev->ibdev, "CM: qp_id %d is already freed\n", iwqp->ibqp.qp_num); kfree(work); return; } irdma_qp_add_ref(&iwqp->ibqp); spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags); work->iwqp = iwqp; INIT_WORK(&work->work, irdma_disconnect_worker); queue_work(iwdev->cleanup_wq, &work->work); } /** * irdma_qp_disconnect - free qp and close cm * @iwqp: associate qp for the connection */ static void irdma_qp_disconnect(struct irdma_qp *iwqp) { struct irdma_device *iwdev = iwqp->iwdev; iwqp->active_conn = 0; /* close the CM node down if it is still active */ ibdev_dbg(&iwdev->ibdev, "CM: Call close API\n"); irdma_cm_close(iwqp->cm_node); } /** * irdma_cm_disconn_true - called by worker thread to disconnect qp * @iwqp: associate qp for the connection */ static void irdma_cm_disconn_true(struct irdma_qp *iwqp) { struct iw_cm_id *cm_id; struct irdma_device *iwdev; struct irdma_sc_qp *qp = &iwqp->sc_qp; u16 last_ae; u8 original_hw_tcp_state; u8 original_ibqp_state; int disconn_status = 0; int issue_disconn = 0; int issue_close = 0; int issue_flush = 0; unsigned long flags; int err; iwdev = iwqp->iwdev; spin_lock_irqsave(&iwqp->lock, flags); if (rdma_protocol_roce(&iwdev->ibdev, 1)) { struct ib_qp_attr attr; if (iwqp->flush_issued || iwqp->sc_qp.qp_uk.destroy_pending) { spin_unlock_irqrestore(&iwqp->lock, flags); return; } spin_unlock_irqrestore(&iwqp->lock, flags); attr.qp_state = IB_QPS_ERR; irdma_modify_qp_roce(&iwqp->ibqp, &attr, IB_QP_STATE, NULL); irdma_ib_qp_event(iwqp, qp->event_type); return; } cm_id = iwqp->cm_id; original_hw_tcp_state = iwqp->hw_tcp_state; original_ibqp_state = iwqp->ibqp_state; last_ae = iwqp->last_aeq; if (qp->term_flags) { issue_disconn = 1; issue_close = 1; iwqp->cm_id = NULL; irdma_terminate_del_timer(qp); if (!iwqp->flush_issued) { iwqp->flush_issued = 1; issue_flush = 1; } } else if ((original_hw_tcp_state == IRDMA_TCP_STATE_CLOSE_WAIT) || ((original_ibqp_state == IB_QPS_RTS) && (last_ae == IRDMA_AE_LLP_CONNECTION_RESET))) { issue_disconn = 1; if (last_ae == IRDMA_AE_LLP_CONNECTION_RESET) disconn_status = -ECONNRESET; } if (original_hw_tcp_state == IRDMA_TCP_STATE_CLOSED || original_hw_tcp_state == IRDMA_TCP_STATE_TIME_WAIT || last_ae == IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE || last_ae == IRDMA_AE_BAD_CLOSE || last_ae == IRDMA_AE_LLP_CONNECTION_RESET || iwdev->rf->reset || !cm_id) { issue_close = 1; iwqp->cm_id = NULL; qp->term_flags = 0; if (!iwqp->flush_issued) { iwqp->flush_issued = 1; issue_flush = 1; } } spin_unlock_irqrestore(&iwqp->lock, flags); if (issue_flush && !iwqp->sc_qp.qp_uk.destroy_pending) { irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ | IRDMA_FLUSH_RQ | IRDMA_FLUSH_WAIT); if (qp->term_flags) irdma_ib_qp_event(iwqp, qp->event_type); } if (!cm_id || !cm_id->event_handler) return; spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags); if (!iwqp->cm_node) { spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags); return; } refcount_inc(&iwqp->cm_node->refcnt); spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags); if (issue_disconn) { err = irdma_send_cm_event(iwqp->cm_node, cm_id, IW_CM_EVENT_DISCONNECT, disconn_status); if (err) ibdev_dbg(&iwdev->ibdev, "CM: disconnect event failed: - cm_id = %p\n", cm_id); } if (issue_close) { cm_id->provider_data = iwqp; err = irdma_send_cm_event(iwqp->cm_node, cm_id, IW_CM_EVENT_CLOSE, 0); if (err) ibdev_dbg(&iwdev->ibdev, "CM: close event failed: - cm_id = %p\n", cm_id); irdma_qp_disconnect(iwqp); } irdma_rem_ref_cm_node(iwqp->cm_node); } /** * irdma_disconnect_worker - worker for connection close * @work: points or disconn structure */ static void irdma_disconnect_worker(struct work_struct *work) { struct disconn_work *dwork = container_of(work, struct disconn_work, work); struct irdma_qp *iwqp = dwork->iwqp; kfree(dwork); irdma_cm_disconn_true(iwqp); irdma_qp_rem_ref(&iwqp->ibqp); } /** * irdma_free_lsmm_rsrc - free lsmm memory and deregister * @iwqp: associate qp for the connection */ void irdma_free_lsmm_rsrc(struct irdma_qp *iwqp) { struct irdma_device *iwdev; iwdev = iwqp->iwdev; if (iwqp->ietf_mem.va) { if (iwqp->lsmm_mr) iwdev->ibdev.ops.dereg_mr(iwqp->lsmm_mr, NULL); dma_free_coherent(iwdev->rf->sc_dev.hw->device, iwqp->ietf_mem.size, iwqp->ietf_mem.va, iwqp->ietf_mem.pa); iwqp->ietf_mem.va = NULL; } } /** * irdma_accept - registered call for connection to be accepted * @cm_id: cm information for passive connection * @conn_param: accpet parameters */ int irdma_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) { struct ib_qp *ibqp; struct irdma_qp *iwqp; struct irdma_device *iwdev; struct irdma_sc_dev *dev; struct irdma_cm_node *cm_node; struct ib_qp_attr attr = {}; int passive_state; struct ib_mr *ibmr; struct irdma_pd *iwpd; u16 buf_len = 0; struct irdma_kmem_info accept; u64 tagged_offset; int wait_ret; int ret = 0; ibqp = irdma_get_qp(cm_id->device, conn_param->qpn); if (!ibqp) return -EINVAL; iwqp = to_iwqp(ibqp); iwdev = iwqp->iwdev; dev = &iwdev->rf->sc_dev; cm_node = cm_id->provider_data; if (((struct sockaddr_in *)&cm_id->local_addr)->sin_family == AF_INET) { cm_node->ipv4 = true; cm_node->vlan_id = irdma_get_vlan_ipv4(cm_node->loc_addr); } else { cm_node->ipv4 = false; irdma_get_vlan_mac_ipv6(cm_node->loc_addr, &cm_node->vlan_id, NULL); } ibdev_dbg(&iwdev->ibdev, "CM: Accept vlan_id=%d\n", cm_node->vlan_id); trace_irdma_accept(cm_node, 0, NULL); if (cm_node->state == IRDMA_CM_STATE_LISTENER_DESTROYED) { ret = -EINVAL; goto error; } passive_state = atomic_add_return(1, &cm_node->passive_state); if (passive_state == IRDMA_SEND_RESET_EVENT) { ret = -ECONNRESET; goto error; } buf_len = conn_param->private_data_len + IRDMA_MAX_IETF_SIZE; iwqp->ietf_mem.size = ALIGN(buf_len, 1); iwqp->ietf_mem.va = dma_alloc_coherent(dev->hw->device, iwqp->ietf_mem.size, &iwqp->ietf_mem.pa, GFP_KERNEL); if (!iwqp->ietf_mem.va) { ret = -ENOMEM; goto error; } cm_node->pdata.size = conn_param->private_data_len; accept.addr = iwqp->ietf_mem.va; accept.size = irdma_cm_build_mpa_frame(cm_node, &accept, MPA_KEY_REPLY); memcpy((u8 *)accept.addr + accept.size, conn_param->private_data, conn_param->private_data_len); if (cm_node->dev->ws_add(iwqp->sc_qp.vsi, cm_node->user_pri)) { ret = -ENOMEM; goto error; } iwqp->sc_qp.user_pri = cm_node->user_pri; irdma_qp_add_qos(&iwqp->sc_qp); /* setup our first outgoing iWarp send WQE (the IETF frame response) */ iwpd = iwqp->iwpd; tagged_offset = (uintptr_t)iwqp->ietf_mem.va; ibmr = irdma_reg_phys_mr(&iwpd->ibpd, iwqp->ietf_mem.pa, buf_len, IB_ACCESS_LOCAL_WRITE, &tagged_offset); if (IS_ERR(ibmr)) { ret = -ENOMEM; goto error; } ibmr->pd = &iwpd->ibpd; ibmr->device = iwpd->ibpd.device; iwqp->lsmm_mr = ibmr; if (iwqp->page) iwqp->sc_qp.qp_uk.sq_base = kmap_local_page(iwqp->page); cm_node->lsmm_size = accept.size + conn_param->private_data_len; irdma_sc_send_lsmm(&iwqp->sc_qp, iwqp->ietf_mem.va, cm_node->lsmm_size, ibmr->lkey); if (iwqp->page) kunmap_local(iwqp->sc_qp.qp_uk.sq_base); iwqp->cm_id = cm_id; cm_node->cm_id = cm_id; cm_id->provider_data = iwqp; iwqp->active_conn = 0; iwqp->cm_node = cm_node; cm_node->iwqp = iwqp; irdma_cm_init_tsa_conn(iwqp, cm_node); irdma_qp_add_ref(&iwqp->ibqp); cm_id->add_ref(cm_id); attr.qp_state = IB_QPS_RTS; cm_node->qhash_set = false; cm_node->cm_core->cm_free_ah(cm_node); irdma_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL); if (dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE) { wait_ret = wait_event_interruptible_timeout(iwqp->waitq, iwqp->rts_ae_rcvd, IRDMA_MAX_TIMEOUT); if (!wait_ret) { ibdev_dbg(&iwdev->ibdev, "CM: Slow Connection: cm_node=%p, loc_port=%d, rem_port=%d, cm_id=%p\n", cm_node, cm_node->loc_port, cm_node->rem_port, cm_node->cm_id); ret = -ECONNRESET; goto error; } } irdma_send_cm_event(cm_node, cm_id, IW_CM_EVENT_ESTABLISHED, 0); cm_node->accelerated = true; complete(&cm_node->establish_comp); if (cm_node->accept_pend) { atomic_dec(&cm_node->listener->pend_accepts_cnt); cm_node->accept_pend = 0; } ibdev_dbg(&iwdev->ibdev, "CM: rem_port=0x%04x, loc_port=0x%04x rem_addr=%pI4 loc_addr=%pI4 cm_node=%p cm_id=%p qp_id = %d\n\n", cm_node->rem_port, cm_node->loc_port, cm_node->rem_addr, cm_node->loc_addr, cm_node, cm_id, ibqp->qp_num); cm_node->cm_core->stats_accepts++; return 0; error: irdma_free_lsmm_rsrc(iwqp); irdma_rem_ref_cm_node(cm_node); return ret; } /** * irdma_reject - registered call for connection to be rejected * @cm_id: cm information for passive connection * @pdata: private data to be sent * @pdata_len: private data length */ int irdma_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) { struct irdma_device *iwdev; struct irdma_cm_node *cm_node; cm_node = cm_id->provider_data; cm_node->pdata.size = pdata_len; trace_irdma_reject(cm_node, 0, NULL); iwdev = to_iwdev(cm_id->device); if (!iwdev) return -EINVAL; cm_node->cm_core->stats_rejects++; if (pdata_len + sizeof(struct ietf_mpa_v2) > IRDMA_MAX_CM_BUF) return -EINVAL; return irdma_cm_reject(cm_node, pdata, pdata_len); } /** * irdma_connect - registered call for connection to be established * @cm_id: cm information for passive connection * @conn_param: Information about the connection */ int irdma_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) { struct ib_qp *ibqp; struct irdma_qp *iwqp; struct irdma_device *iwdev; struct irdma_cm_node *cm_node; struct irdma_cm_info cm_info; struct sockaddr_in *laddr; struct sockaddr_in *raddr; struct sockaddr_in6 *laddr6; struct sockaddr_in6 *raddr6; int ret = 0; ibqp = irdma_get_qp(cm_id->device, conn_param->qpn); if (!ibqp) return -EINVAL; iwqp = to_iwqp(ibqp); if (!iwqp) return -EINVAL; iwdev = iwqp->iwdev; if (!iwdev) return -EINVAL; laddr = (struct sockaddr_in *)&cm_id->m_local_addr; raddr = (struct sockaddr_in *)&cm_id->m_remote_addr; laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr; raddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr; if (!(laddr->sin_port) || !(raddr->sin_port)) return -EINVAL; iwqp->active_conn = 1; iwqp->cm_id = NULL; cm_id->provider_data = iwqp; /* set up the connection params for the node */ if (cm_id->remote_addr.ss_family == AF_INET) { if (iwdev->vsi.mtu < IRDMA_MIN_MTU_IPV4) return -EINVAL; cm_info.ipv4 = true; memset(cm_info.loc_addr, 0, sizeof(cm_info.loc_addr)); memset(cm_info.rem_addr, 0, sizeof(cm_info.rem_addr)); cm_info.loc_addr[0] = ntohl(laddr->sin_addr.s_addr); cm_info.rem_addr[0] = ntohl(raddr->sin_addr.s_addr); cm_info.loc_port = ntohs(laddr->sin_port); cm_info.rem_port = ntohs(raddr->sin_port); cm_info.vlan_id = irdma_get_vlan_ipv4(cm_info.loc_addr); } else { if (iwdev->vsi.mtu < IRDMA_MIN_MTU_IPV6) return -EINVAL; cm_info.ipv4 = false; irdma_copy_ip_ntohl(cm_info.loc_addr, laddr6->sin6_addr.in6_u.u6_addr32); irdma_copy_ip_ntohl(cm_info.rem_addr, raddr6->sin6_addr.in6_u.u6_addr32); cm_info.loc_port = ntohs(laddr6->sin6_port); cm_info.rem_port = ntohs(raddr6->sin6_port); irdma_get_vlan_mac_ipv6(cm_info.loc_addr, &cm_info.vlan_id, NULL); } cm_info.cm_id = cm_id; cm_info.qh_qpid = iwdev->vsi.ilq->qp_id; cm_info.tos = cm_id->tos; if (iwdev->vsi.dscp_mode) { cm_info.user_pri = iwqp->sc_qp.vsi->dscp_map[irdma_tos2dscp(cm_info.tos)]; } else { cm_info.user_pri = rt_tos2priority(cm_id->tos); cm_info.user_pri = irdma_iw_get_vlan_prio(cm_info.loc_addr, cm_info.user_pri, cm_info.ipv4); } if (iwqp->sc_qp.dev->ws_add(iwqp->sc_qp.vsi, cm_info.user_pri)) return -ENOMEM; iwqp->sc_qp.user_pri = cm_info.user_pri; irdma_qp_add_qos(&iwqp->sc_qp); ibdev_dbg(&iwdev->ibdev, "DCB: TOS:[%d] UP:[%d]\n", cm_id->tos, cm_info.user_pri); trace_irdma_dcb_tos(iwdev, cm_id->tos, cm_info.user_pri); ret = irdma_create_cm_node(&iwdev->cm_core, iwdev, conn_param, &cm_info, &cm_node); if (ret) return ret; ret = cm_node->cm_core->cm_create_ah(cm_node, true); if (ret) goto err; if (irdma_manage_qhash(iwdev, &cm_info, IRDMA_QHASH_TYPE_TCP_ESTABLISHED, IRDMA_QHASH_MANAGE_TYPE_ADD, NULL, true)) { ret = -EINVAL; goto err; } cm_node->qhash_set = true; cm_node->apbvt_entry = irdma_add_apbvt(iwdev, cm_info.loc_port); if (!cm_node->apbvt_entry) { ret = -EINVAL; goto err; } cm_node->apbvt_set = true; iwqp->cm_node = cm_node; cm_node->iwqp = iwqp; iwqp->cm_id = cm_id; irdma_qp_add_ref(&iwqp->ibqp); cm_id->add_ref(cm_id); if (cm_node->state != IRDMA_CM_STATE_OFFLOADED) { cm_node->state = IRDMA_CM_STATE_SYN_SENT; ret = irdma_send_syn(cm_node, 0); if (ret) goto err; } ibdev_dbg(&iwdev->ibdev, "CM: rem_port=0x%04x, loc_port=0x%04x rem_addr=%pI4 loc_addr=%pI4 cm_node=%p cm_id=%p qp_id = %d\n\n", cm_node->rem_port, cm_node->loc_port, cm_node->rem_addr, cm_node->loc_addr, cm_node, cm_id, ibqp->qp_num); trace_irdma_connect(cm_node, 0, NULL); return 0; err: if (cm_info.ipv4) ibdev_dbg(&iwdev->ibdev, "CM: connect() FAILED: dest addr=%pI4", cm_info.rem_addr); else ibdev_dbg(&iwdev->ibdev, "CM: connect() FAILED: dest addr=%pI6", cm_info.rem_addr); irdma_rem_ref_cm_node(cm_node); iwdev->cm_core.stats_connect_errs++; return ret; } /** * irdma_create_listen - registered call creating listener * @cm_id: cm information for passive connection * @backlog: to max accept pending count */ int irdma_create_listen(struct iw_cm_id *cm_id, int backlog) { struct irdma_device *iwdev; struct irdma_cm_listener *cm_listen_node; struct irdma_cm_info cm_info = {}; struct sockaddr_in *laddr; struct sockaddr_in6 *laddr6; bool wildcard = false; int err; iwdev = to_iwdev(cm_id->device); if (!iwdev) return -EINVAL; laddr = (struct sockaddr_in *)&cm_id->m_local_addr; laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr; cm_info.qh_qpid = iwdev->vsi.ilq->qp_id; if (laddr->sin_family == AF_INET) { if (iwdev->vsi.mtu < IRDMA_MIN_MTU_IPV4) return -EINVAL; cm_info.ipv4 = true; cm_info.loc_addr[0] = ntohl(laddr->sin_addr.s_addr); cm_info.loc_port = ntohs(laddr->sin_port); if (laddr->sin_addr.s_addr != htonl(INADDR_ANY)) { cm_info.vlan_id = irdma_get_vlan_ipv4(cm_info.loc_addr); } else { cm_info.vlan_id = 0xFFFF; wildcard = true; } } else { if (iwdev->vsi.mtu < IRDMA_MIN_MTU_IPV6) return -EINVAL; cm_info.ipv4 = false; irdma_copy_ip_ntohl(cm_info.loc_addr, laddr6->sin6_addr.in6_u.u6_addr32); cm_info.loc_port = ntohs(laddr6->sin6_port); if (ipv6_addr_type(&laddr6->sin6_addr) != IPV6_ADDR_ANY) { irdma_get_vlan_mac_ipv6(cm_info.loc_addr, &cm_info.vlan_id, NULL); } else { cm_info.vlan_id = 0xFFFF; wildcard = true; } } if (cm_info.vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode) cm_info.vlan_id = 0; cm_info.backlog = backlog; cm_info.cm_id = cm_id; trace_irdma_create_listen(iwdev, &cm_info); cm_listen_node = irdma_make_listen_node(&iwdev->cm_core, iwdev, &cm_info); if (!cm_listen_node) { ibdev_dbg(&iwdev->ibdev, "CM: cm_listen_node == NULL\n"); return -ENOMEM; } cm_id->provider_data = cm_listen_node; cm_listen_node->tos = cm_id->tos; if (iwdev->vsi.dscp_mode) cm_listen_node->user_pri = iwdev->vsi.dscp_map[irdma_tos2dscp(cm_id->tos)]; else cm_listen_node->user_pri = rt_tos2priority(cm_id->tos); cm_info.user_pri = cm_listen_node->user_pri; if (!cm_listen_node->reused_node) { if (wildcard) { err = irdma_add_mqh(iwdev, &cm_info, cm_listen_node); if (err) goto error; } else { if (!iwdev->vsi.dscp_mode) cm_listen_node->user_pri = irdma_iw_get_vlan_prio(cm_info.loc_addr, cm_info.user_pri, cm_info.ipv4); cm_info.user_pri = cm_listen_node->user_pri; err = irdma_manage_qhash(iwdev, &cm_info, IRDMA_QHASH_TYPE_TCP_SYN, IRDMA_QHASH_MANAGE_TYPE_ADD, NULL, true); if (err) goto error; cm_listen_node->qhash_set = true; } cm_listen_node->apbvt_entry = irdma_add_apbvt(iwdev, cm_info.loc_port); if (!cm_listen_node->apbvt_entry) goto error; } cm_id->add_ref(cm_id); cm_listen_node->cm_core->stats_listen_created++; ibdev_dbg(&iwdev->ibdev, "CM: loc_port=0x%04x loc_addr=%pI4 cm_listen_node=%p cm_id=%p qhash_set=%d vlan_id=%d\n", cm_listen_node->loc_port, cm_listen_node->loc_addr, cm_listen_node, cm_listen_node->cm_id, cm_listen_node->qhash_set, cm_listen_node->vlan_id); return 0; error: irdma_cm_del_listen(&iwdev->cm_core, cm_listen_node, false); return -EINVAL; } /** * irdma_destroy_listen - registered call to destroy listener * @cm_id: cm information for passive connection */ int irdma_destroy_listen(struct iw_cm_id *cm_id) { struct irdma_device *iwdev; iwdev = to_iwdev(cm_id->device); if (cm_id->provider_data) irdma_cm_del_listen(&iwdev->cm_core, cm_id->provider_data, true); else ibdev_dbg(&iwdev->ibdev, "CM: cm_id->provider_data was NULL\n"); cm_id->rem_ref(cm_id); return 0; } /** * irdma_teardown_list_prep - add conn nodes slated for tear down to list * @cm_core: cm's core * @teardown_list: a list to which cm_node will be selected * @ipaddr: pointer to ip address * @nfo: pointer to cm_info structure instance * @disconnect_all: flag indicating disconnect all QPs */ static void irdma_teardown_list_prep(struct irdma_cm_core *cm_core, struct list_head *teardown_list, u32 *ipaddr, struct irdma_cm_info *nfo, bool disconnect_all) { struct irdma_cm_node *cm_node; int bkt; hash_for_each_rcu(cm_core->cm_hash_tbl, bkt, cm_node, list) { if ((disconnect_all || (nfo->vlan_id == cm_node->vlan_id && !memcmp(cm_node->loc_addr, ipaddr, nfo->ipv4 ? 4 : 16))) && refcount_inc_not_zero(&cm_node->refcnt)) list_add(&cm_node->teardown_entry, teardown_list); } } /** * irdma_cm_event_connected - handle connected active node * @event: the info for cm_node of connection */ static void irdma_cm_event_connected(struct irdma_cm_event *event) { struct irdma_qp *iwqp; struct irdma_device *iwdev; struct irdma_cm_node *cm_node; struct irdma_sc_dev *dev; struct ib_qp_attr attr = {}; struct iw_cm_id *cm_id; int status; bool read0; int wait_ret = 0; cm_node = event->cm_node; cm_id = cm_node->cm_id; iwqp = cm_id->provider_data; iwdev = iwqp->iwdev; dev = &iwdev->rf->sc_dev; if (iwqp->sc_qp.qp_uk.destroy_pending) { status = -ETIMEDOUT; goto error; } irdma_cm_init_tsa_conn(iwqp, cm_node); read0 = (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO); if (iwqp->page) iwqp->sc_qp.qp_uk.sq_base = kmap_local_page(iwqp->page); irdma_sc_send_rtt(&iwqp->sc_qp, read0); if (iwqp->page) kunmap_local(iwqp->sc_qp.qp_uk.sq_base); attr.qp_state = IB_QPS_RTS; cm_node->qhash_set = false; irdma_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL); if (dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE) { wait_ret = wait_event_interruptible_timeout(iwqp->waitq, iwqp->rts_ae_rcvd, IRDMA_MAX_TIMEOUT); if (!wait_ret) ibdev_dbg(&iwdev->ibdev, "CM: Slow Connection: cm_node=%p, loc_port=%d, rem_port=%d, cm_id=%p\n", cm_node, cm_node->loc_port, cm_node->rem_port, cm_node->cm_id); } irdma_send_cm_event(cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY, 0); cm_node->accelerated = true; complete(&cm_node->establish_comp); cm_node->cm_core->cm_free_ah(cm_node); return; error: iwqp->cm_id = NULL; cm_id->provider_data = NULL; irdma_send_cm_event(event->cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY, status); irdma_rem_ref_cm_node(event->cm_node); } /** * irdma_cm_event_reset - handle reset * @event: the info for cm_node of connection */ static void irdma_cm_event_reset(struct irdma_cm_event *event) { struct irdma_cm_node *cm_node = event->cm_node; struct iw_cm_id *cm_id = cm_node->cm_id; struct irdma_qp *iwqp; if (!cm_id) return; iwqp = cm_id->provider_data; if (!iwqp) return; ibdev_dbg(&cm_node->iwdev->ibdev, "CM: reset event %p - cm_id = %p\n", event->cm_node, cm_id); iwqp->cm_id = NULL; irdma_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_DISCONNECT, -ECONNRESET); irdma_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_CLOSE, 0); } /** * irdma_cm_event_handler - send event to cm upper layer * @work: pointer of cm event info. */ static void irdma_cm_event_handler(struct work_struct *work) { struct irdma_cm_event *event = container_of(work, struct irdma_cm_event, event_work); struct irdma_cm_node *cm_node; if (!event || !event->cm_node || !event->cm_node->cm_core) return; cm_node = event->cm_node; trace_irdma_cm_event_handler(cm_node, event->type, NULL); switch (event->type) { case IRDMA_CM_EVENT_MPA_REQ: irdma_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_CONNECT_REQUEST, 0); break; case IRDMA_CM_EVENT_RESET: irdma_cm_event_reset(event); break; case IRDMA_CM_EVENT_CONNECTED: if (!event->cm_node->cm_id || event->cm_node->state != IRDMA_CM_STATE_OFFLOADED) break; irdma_cm_event_connected(event); break; case IRDMA_CM_EVENT_MPA_REJECT: if (!event->cm_node->cm_id || cm_node->state == IRDMA_CM_STATE_OFFLOADED) break; irdma_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_CONNECT_REPLY, -ECONNREFUSED); break; case IRDMA_CM_EVENT_ABORTED: if (!event->cm_node->cm_id || event->cm_node->state == IRDMA_CM_STATE_OFFLOADED) break; irdma_event_connect_error(event); break; default: ibdev_dbg(&cm_node->iwdev->ibdev, "CM: bad event type = %d\n", event->type); break; } irdma_rem_ref_cm_node(event->cm_node); kfree(event); } /** * irdma_cm_post_event - queue event request for worker thread * @event: cm node's info for up event call */ static void irdma_cm_post_event(struct irdma_cm_event *event) { refcount_inc(&event->cm_node->refcnt); INIT_WORK(&event->event_work, irdma_cm_event_handler); queue_work(event->cm_node->cm_core->event_wq, &event->event_work); } /** * irdma_cm_teardown_connections - teardown QPs * @iwdev: device pointer * @ipaddr: Pointer to IPv4 or IPv6 address * @nfo: Connection info * @disconnect_all: flag indicating disconnect all QPs * * teardown QPs where source or destination addr matches ip addr */ void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr, struct irdma_cm_info *nfo, bool disconnect_all) { struct irdma_cm_core *cm_core = &iwdev->cm_core; struct list_head *list_core_temp; struct list_head *list_node; struct irdma_cm_node *cm_node; struct list_head teardown_list; struct ib_qp_attr attr; INIT_LIST_HEAD(&teardown_list); rcu_read_lock(); irdma_teardown_list_prep(cm_core, &teardown_list, ipaddr, nfo, disconnect_all); rcu_read_unlock(); list_for_each_safe (list_node, list_core_temp, &teardown_list) { cm_node = container_of(list_node, struct irdma_cm_node, teardown_entry); attr.qp_state = IB_QPS_ERR; irdma_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL); if (iwdev->rf->reset) irdma_cm_disconn(cm_node->iwqp); irdma_rem_ref_cm_node(cm_node); } } /** * irdma_qhash_ctrl - enable/disable qhash for list * @iwdev: device pointer * @parent_listen_node: parent listen node * @nfo: cm info node * @ipaddr: Pointer to IPv4 or IPv6 address * @ipv4: flag indicating IPv4 when true * @ifup: flag indicating interface up when true * * Enables or disables the qhash for the node in the child * listen list that matches ipaddr. If no matching IP was found * it will allocate and add a new child listen node to the * parent listen node. The listen_list_lock is assumed to be * held when called. */ static void irdma_qhash_ctrl(struct irdma_device *iwdev, struct irdma_cm_listener *parent_listen_node, struct irdma_cm_info *nfo, u32 *ipaddr, bool ipv4, bool ifup) { struct list_head *child_listen_list = &parent_listen_node->child_listen_list; struct irdma_cm_listener *child_listen_node; struct list_head *pos, *tpos; bool node_allocated = false; enum irdma_quad_hash_manage_type op = ifup ? IRDMA_QHASH_MANAGE_TYPE_ADD : IRDMA_QHASH_MANAGE_TYPE_DELETE; int err; list_for_each_safe (pos, tpos, child_listen_list) { child_listen_node = list_entry(pos, struct irdma_cm_listener, child_listen_list); if (!memcmp(child_listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16)) goto set_qhash; } /* if not found then add a child listener if interface is going up */ if (!ifup) return; child_listen_node = kmemdup(parent_listen_node, sizeof(*child_listen_node), GFP_ATOMIC); if (!child_listen_node) return; node_allocated = true; memcpy(child_listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16); set_qhash: memcpy(nfo->loc_addr, child_listen_node->loc_addr, sizeof(nfo->loc_addr)); nfo->vlan_id = child_listen_node->vlan_id; err = irdma_manage_qhash(iwdev, nfo, IRDMA_QHASH_TYPE_TCP_SYN, op, NULL, false); if (!err) { child_listen_node->qhash_set = ifup; if (node_allocated) list_add(&child_listen_node->child_listen_list, &parent_listen_node->child_listen_list); } else if (node_allocated) { kfree(child_listen_node); } } /** * irdma_if_notify - process an ifdown on an interface * @iwdev: device pointer * @netdev: network device structure * @ipaddr: Pointer to IPv4 or IPv6 address * @ipv4: flag indicating IPv4 when true * @ifup: flag indicating interface up when true */ void irdma_if_notify(struct irdma_device *iwdev, struct net_device *netdev, u32 *ipaddr, bool ipv4, bool ifup) { struct irdma_cm_core *cm_core = &iwdev->cm_core; unsigned long flags; struct irdma_cm_listener *listen_node; static const u32 ip_zero[4] = { 0, 0, 0, 0 }; struct irdma_cm_info nfo = {}; u16 vlan_id = rdma_vlan_dev_vlan_id(netdev); enum irdma_quad_hash_manage_type op = ifup ? IRDMA_QHASH_MANAGE_TYPE_ADD : IRDMA_QHASH_MANAGE_TYPE_DELETE; nfo.vlan_id = vlan_id; nfo.ipv4 = ipv4; nfo.qh_qpid = 1; /* Disable or enable qhash for listeners */ spin_lock_irqsave(&cm_core->listen_list_lock, flags); list_for_each_entry (listen_node, &cm_core->listen_list, list) { if (vlan_id != listen_node->vlan_id || (memcmp(listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16) && memcmp(listen_node->loc_addr, ip_zero, ipv4 ? 4 : 16))) continue; memcpy(nfo.loc_addr, listen_node->loc_addr, sizeof(nfo.loc_addr)); nfo.loc_port = listen_node->loc_port; nfo.user_pri = listen_node->user_pri; if (!list_empty(&listen_node->child_listen_list)) { irdma_qhash_ctrl(iwdev, listen_node, &nfo, ipaddr, ipv4, ifup); } else if (memcmp(listen_node->loc_addr, ip_zero, ipv4 ? 4 : 16)) { if (!irdma_manage_qhash(iwdev, &nfo, IRDMA_QHASH_TYPE_TCP_SYN, op, NULL, false)) listen_node->qhash_set = ifup; } } spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); /* disconnect any connected qp's on ifdown */ if (!ifup) irdma_cm_teardown_connections(iwdev, ipaddr, &nfo, false); }
linux-master
drivers/infiniband/hw/irdma/cm.c
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB /* Copyright (c) 2015 - 2021 Intel Corporation */ #include "osdep.h" #include "hmc.h" #include "defs.h" #include "type.h" #include "protos.h" /** * irdma_find_sd_index_limit - finds segment descriptor index limit * @hmc_info: pointer to the HMC configuration information structure * @type: type of HMC resources we're searching * @idx: starting index for the object * @cnt: number of objects we're trying to create * @sd_idx: pointer to return index of the segment descriptor in question * @sd_limit: pointer to return the maximum number of segment descriptors * * This function calculates the segment descriptor index and index limit * for the resource defined by irdma_hmc_rsrc_type. */ static void irdma_find_sd_index_limit(struct irdma_hmc_info *hmc_info, u32 type, u32 idx, u32 cnt, u32 *sd_idx, u32 *sd_limit) { u64 fpm_addr, fpm_limit; fpm_addr = hmc_info->hmc_obj[(type)].base + hmc_info->hmc_obj[type].size * idx; fpm_limit = fpm_addr + hmc_info->hmc_obj[type].size * cnt; *sd_idx = (u32)(fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE); *sd_limit = (u32)((fpm_limit - 1) / IRDMA_HMC_DIRECT_BP_SIZE); *sd_limit += 1; } /** * irdma_find_pd_index_limit - finds page descriptor index limit * @hmc_info: pointer to the HMC configuration information struct * @type: HMC resource type we're examining * @idx: starting index for the object * @cnt: number of objects we're trying to create * @pd_idx: pointer to return page descriptor index * @pd_limit: pointer to return page descriptor index limit * * Calculates the page descriptor index and index limit for the resource * defined by irdma_hmc_rsrc_type. */ static void irdma_find_pd_index_limit(struct irdma_hmc_info *hmc_info, u32 type, u32 idx, u32 cnt, u32 *pd_idx, u32 *pd_limit) { u64 fpm_adr, fpm_limit; fpm_adr = hmc_info->hmc_obj[type].base + hmc_info->hmc_obj[type].size * idx; fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); *pd_idx = (u32)(fpm_adr / IRDMA_HMC_PAGED_BP_SIZE); *pd_limit = (u32)((fpm_limit - 1) / IRDMA_HMC_PAGED_BP_SIZE); *pd_limit += 1; } /** * irdma_set_sd_entry - setup entry for sd programming * @pa: physical addr * @idx: sd index * @type: paged or direct sd * @entry: sd entry ptr */ static void irdma_set_sd_entry(u64 pa, u32 idx, enum irdma_sd_entry_type type, struct irdma_update_sd_entry *entry) { entry->data = pa | FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT, IRDMA_HMC_MAX_BP_COUNT) | FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDTYPE, type == IRDMA_SD_TYPE_PAGED ? 0 : 1) | FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDVALID, 1); entry->cmd = idx | FIELD_PREP(IRDMA_PFHMC_SDCMD_PMSDWR, 1) | BIT(15); } /** * irdma_clr_sd_entry - setup entry for sd clear * @idx: sd index * @type: paged or direct sd * @entry: sd entry ptr */ static void irdma_clr_sd_entry(u32 idx, enum irdma_sd_entry_type type, struct irdma_update_sd_entry *entry) { entry->data = FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT, IRDMA_HMC_MAX_BP_COUNT) | FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDTYPE, type == IRDMA_SD_TYPE_PAGED ? 0 : 1); entry->cmd = idx | FIELD_PREP(IRDMA_PFHMC_SDCMD_PMSDWR, 1) | BIT(15); } /** * irdma_invalidate_pf_hmc_pd - Invalidates the pd cache in the hardware for PF * @dev: pointer to our device struct * @sd_idx: segment descriptor index * @pd_idx: page descriptor index */ static inline void irdma_invalidate_pf_hmc_pd(struct irdma_sc_dev *dev, u32 sd_idx, u32 pd_idx) { u32 val = FIELD_PREP(IRDMA_PFHMC_PDINV_PMSDIDX, sd_idx) | FIELD_PREP(IRDMA_PFHMC_PDINV_PMSDPARTSEL, 1) | FIELD_PREP(IRDMA_PFHMC_PDINV_PMPDIDX, pd_idx); writel(val, dev->hw_regs[IRDMA_PFHMC_PDINV]); } /** * irdma_hmc_sd_one - setup 1 sd entry for cqp * @dev: pointer to the device structure * @hmc_fn_id: hmc's function id * @pa: physical addr * @sd_idx: sd index * @type: paged or direct sd * @setsd: flag to set or clear sd */ int irdma_hmc_sd_one(struct irdma_sc_dev *dev, u8 hmc_fn_id, u64 pa, u32 sd_idx, enum irdma_sd_entry_type type, bool setsd) { struct irdma_update_sds_info sdinfo; sdinfo.cnt = 1; sdinfo.hmc_fn_id = hmc_fn_id; if (setsd) irdma_set_sd_entry(pa, sd_idx, type, sdinfo.entry); else irdma_clr_sd_entry(sd_idx, type, sdinfo.entry); return dev->cqp->process_cqp_sds(dev, &sdinfo); } /** * irdma_hmc_sd_grp - setup group of sd entries for cqp * @dev: pointer to the device structure * @hmc_info: pointer to the HMC configuration information struct * @sd_index: sd index * @sd_cnt: number of sd entries * @setsd: flag to set or clear sd */ static int irdma_hmc_sd_grp(struct irdma_sc_dev *dev, struct irdma_hmc_info *hmc_info, u32 sd_index, u32 sd_cnt, bool setsd) { struct irdma_hmc_sd_entry *sd_entry; struct irdma_update_sds_info sdinfo = {}; u64 pa; u32 i; int ret_code = 0; sdinfo.hmc_fn_id = hmc_info->hmc_fn_id; for (i = sd_index; i < sd_index + sd_cnt; i++) { sd_entry = &hmc_info->sd_table.sd_entry[i]; if (!sd_entry || (!sd_entry->valid && setsd) || (sd_entry->valid && !setsd)) continue; if (setsd) { pa = (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED) ? sd_entry->u.pd_table.pd_page_addr.pa : sd_entry->u.bp.addr.pa; irdma_set_sd_entry(pa, i, sd_entry->entry_type, &sdinfo.entry[sdinfo.cnt]); } else { irdma_clr_sd_entry(i, sd_entry->entry_type, &sdinfo.entry[sdinfo.cnt]); } sdinfo.cnt++; if (sdinfo.cnt == IRDMA_MAX_SD_ENTRIES) { ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo); if (ret_code) { ibdev_dbg(to_ibdev(dev), "HMC: sd_programming failed err=%d\n", ret_code); return ret_code; } sdinfo.cnt = 0; } } if (sdinfo.cnt) ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo); return ret_code; } /** * irdma_hmc_finish_add_sd_reg - program sd entries for objects * @dev: pointer to the device structure * @info: create obj info */ static int irdma_hmc_finish_add_sd_reg(struct irdma_sc_dev *dev, struct irdma_hmc_create_obj_info *info) { if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) return -EINVAL; if ((info->start_idx + info->count) > info->hmc_info->hmc_obj[info->rsrc_type].cnt) return -EINVAL; if (!info->add_sd_cnt) return 0; return irdma_hmc_sd_grp(dev, info->hmc_info, info->hmc_info->sd_indexes[0], info->add_sd_cnt, true); } /** * irdma_sc_create_hmc_obj - allocate backing store for hmc objects * @dev: pointer to the device structure * @info: pointer to irdma_hmc_create_obj_info struct * * This will allocate memory for PDs and backing pages and populate * the sd and pd entries. */ int irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev, struct irdma_hmc_create_obj_info *info) { struct irdma_hmc_sd_entry *sd_entry; u32 sd_idx, sd_lmt; u32 pd_idx = 0, pd_lmt = 0; u32 pd_idx1 = 0, pd_lmt1 = 0; u32 i, j; bool pd_error = false; int ret_code = 0; if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) return -EINVAL; if ((info->start_idx + info->count) > info->hmc_info->hmc_obj[info->rsrc_type].cnt) { ibdev_dbg(to_ibdev(dev), "HMC: error type %u, start = %u, req cnt %u, cnt = %u\n", info->rsrc_type, info->start_idx, info->count, info->hmc_info->hmc_obj[info->rsrc_type].cnt); return -EINVAL; } irdma_find_sd_index_limit(info->hmc_info, info->rsrc_type, info->start_idx, info->count, &sd_idx, &sd_lmt); if (sd_idx >= info->hmc_info->sd_table.sd_cnt || sd_lmt > info->hmc_info->sd_table.sd_cnt) { return -EINVAL; } irdma_find_pd_index_limit(info->hmc_info, info->rsrc_type, info->start_idx, info->count, &pd_idx, &pd_lmt); for (j = sd_idx; j < sd_lmt; j++) { ret_code = irdma_add_sd_table_entry(dev->hw, info->hmc_info, j, info->entry_type, IRDMA_HMC_DIRECT_BP_SIZE); if (ret_code) goto exit_sd_error; sd_entry = &info->hmc_info->sd_table.sd_entry[j]; if (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED && (dev->hmc_info == info->hmc_info && info->rsrc_type != IRDMA_HMC_IW_PBLE)) { pd_idx1 = max(pd_idx, (j * IRDMA_HMC_MAX_BP_COUNT)); pd_lmt1 = min(pd_lmt, (j + 1) * IRDMA_HMC_MAX_BP_COUNT); for (i = pd_idx1; i < pd_lmt1; i++) { /* update the pd table entry */ ret_code = irdma_add_pd_table_entry(dev, info->hmc_info, i, NULL); if (ret_code) { pd_error = true; break; } } if (pd_error) { while (i && (i > pd_idx1)) { irdma_remove_pd_bp(dev, info->hmc_info, i - 1); i--; } } } if (sd_entry->valid) continue; info->hmc_info->sd_indexes[info->add_sd_cnt] = (u16)j; info->add_sd_cnt++; sd_entry->valid = true; } return irdma_hmc_finish_add_sd_reg(dev, info); exit_sd_error: while (j && (j > sd_idx)) { sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1]; switch (sd_entry->entry_type) { case IRDMA_SD_TYPE_PAGED: pd_idx1 = max(pd_idx, (j - 1) * IRDMA_HMC_MAX_BP_COUNT); pd_lmt1 = min(pd_lmt, (j * IRDMA_HMC_MAX_BP_COUNT)); for (i = pd_idx1; i < pd_lmt1; i++) irdma_prep_remove_pd_page(info->hmc_info, i); break; case IRDMA_SD_TYPE_DIRECT: irdma_prep_remove_pd_page(info->hmc_info, (j - 1)); break; default: ret_code = -EINVAL; break; } j--; } return ret_code; } /** * irdma_finish_del_sd_reg - delete sd entries for objects * @dev: pointer to the device structure * @info: dele obj info * @reset: true if called before reset */ static int irdma_finish_del_sd_reg(struct irdma_sc_dev *dev, struct irdma_hmc_del_obj_info *info, bool reset) { struct irdma_hmc_sd_entry *sd_entry; int ret_code = 0; u32 i, sd_idx; struct irdma_dma_mem *mem; if (!reset) ret_code = irdma_hmc_sd_grp(dev, info->hmc_info, info->hmc_info->sd_indexes[0], info->del_sd_cnt, false); if (ret_code) ibdev_dbg(to_ibdev(dev), "HMC: error cqp sd sd_grp\n"); for (i = 0; i < info->del_sd_cnt; i++) { sd_idx = info->hmc_info->sd_indexes[i]; sd_entry = &info->hmc_info->sd_table.sd_entry[sd_idx]; mem = (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED) ? &sd_entry->u.pd_table.pd_page_addr : &sd_entry->u.bp.addr; if (!mem || !mem->va) { ibdev_dbg(to_ibdev(dev), "HMC: error cqp sd mem\n"); } else { dma_free_coherent(dev->hw->device, mem->size, mem->va, mem->pa); mem->va = NULL; } } return ret_code; } /** * irdma_sc_del_hmc_obj - remove pe hmc objects * @dev: pointer to the device structure * @info: pointer to irdma_hmc_del_obj_info struct * @reset: true if called before reset * * This will de-populate the SDs and PDs. It frees * the memory for PDS and backing storage. After this function is returned, * caller should deallocate memory allocated previously for * book-keeping information about PDs and backing storage. */ int irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev, struct irdma_hmc_del_obj_info *info, bool reset) { struct irdma_hmc_pd_table *pd_table; u32 sd_idx, sd_lmt; u32 pd_idx, pd_lmt, rel_pd_idx; u32 i, j; int ret_code = 0; if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) { ibdev_dbg(to_ibdev(dev), "HMC: error start_idx[%04d] >= [type %04d].cnt[%04d]\n", info->start_idx, info->rsrc_type, info->hmc_info->hmc_obj[info->rsrc_type].cnt); return -EINVAL; } if ((info->start_idx + info->count) > info->hmc_info->hmc_obj[info->rsrc_type].cnt) { ibdev_dbg(to_ibdev(dev), "HMC: error start_idx[%04d] + count %04d >= [type %04d].cnt[%04d]\n", info->start_idx, info->count, info->rsrc_type, info->hmc_info->hmc_obj[info->rsrc_type].cnt); return -EINVAL; } irdma_find_pd_index_limit(info->hmc_info, info->rsrc_type, info->start_idx, info->count, &pd_idx, &pd_lmt); for (j = pd_idx; j < pd_lmt; j++) { sd_idx = j / IRDMA_HMC_PD_CNT_IN_SD; if (!info->hmc_info->sd_table.sd_entry[sd_idx].valid) continue; if (info->hmc_info->sd_table.sd_entry[sd_idx].entry_type != IRDMA_SD_TYPE_PAGED) continue; rel_pd_idx = j % IRDMA_HMC_PD_CNT_IN_SD; pd_table = &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; if (pd_table->pd_entry && pd_table->pd_entry[rel_pd_idx].valid) { ret_code = irdma_remove_pd_bp(dev, info->hmc_info, j); if (ret_code) { ibdev_dbg(to_ibdev(dev), "HMC: remove_pd_bp error\n"); return ret_code; } } } irdma_find_sd_index_limit(info->hmc_info, info->rsrc_type, info->start_idx, info->count, &sd_idx, &sd_lmt); if (sd_idx >= info->hmc_info->sd_table.sd_cnt || sd_lmt > info->hmc_info->sd_table.sd_cnt) { ibdev_dbg(to_ibdev(dev), "HMC: invalid sd_idx\n"); return -EINVAL; } for (i = sd_idx; i < sd_lmt; i++) { pd_table = &info->hmc_info->sd_table.sd_entry[i].u.pd_table; if (!info->hmc_info->sd_table.sd_entry[i].valid) continue; switch (info->hmc_info->sd_table.sd_entry[i].entry_type) { case IRDMA_SD_TYPE_DIRECT: ret_code = irdma_prep_remove_sd_bp(info->hmc_info, i); if (!ret_code) { info->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i; info->del_sd_cnt++; } break; case IRDMA_SD_TYPE_PAGED: ret_code = irdma_prep_remove_pd_page(info->hmc_info, i); if (ret_code) break; if (dev->hmc_info != info->hmc_info && info->rsrc_type == IRDMA_HMC_IW_PBLE && pd_table->pd_entry) { kfree(pd_table->pd_entry_virt_mem.va); pd_table->pd_entry = NULL; } info->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i; info->del_sd_cnt++; break; default: break; } } return irdma_finish_del_sd_reg(dev, info, reset); } /** * irdma_add_sd_table_entry - Adds a segment descriptor to the table * @hw: pointer to our hw struct * @hmc_info: pointer to the HMC configuration information struct * @sd_index: segment descriptor index to manipulate * @type: what type of segment descriptor we're manipulating * @direct_mode_sz: size to alloc in direct mode */ int irdma_add_sd_table_entry(struct irdma_hw *hw, struct irdma_hmc_info *hmc_info, u32 sd_index, enum irdma_sd_entry_type type, u64 direct_mode_sz) { struct irdma_hmc_sd_entry *sd_entry; struct irdma_dma_mem dma_mem; u64 alloc_len; sd_entry = &hmc_info->sd_table.sd_entry[sd_index]; if (!sd_entry->valid) { if (type == IRDMA_SD_TYPE_PAGED) alloc_len = IRDMA_HMC_PAGED_BP_SIZE; else alloc_len = direct_mode_sz; /* allocate a 4K pd page or 2M backing page */ dma_mem.size = ALIGN(alloc_len, IRDMA_HMC_PD_BP_BUF_ALIGNMENT); dma_mem.va = dma_alloc_coherent(hw->device, dma_mem.size, &dma_mem.pa, GFP_KERNEL); if (!dma_mem.va) return -ENOMEM; if (type == IRDMA_SD_TYPE_PAGED) { struct irdma_virt_mem *vmem = &sd_entry->u.pd_table.pd_entry_virt_mem; vmem->size = sizeof(struct irdma_hmc_pd_entry) * 512; vmem->va = kzalloc(vmem->size, GFP_KERNEL); if (!vmem->va) { dma_free_coherent(hw->device, dma_mem.size, dma_mem.va, dma_mem.pa); dma_mem.va = NULL; return -ENOMEM; } sd_entry->u.pd_table.pd_entry = vmem->va; memcpy(&sd_entry->u.pd_table.pd_page_addr, &dma_mem, sizeof(sd_entry->u.pd_table.pd_page_addr)); } else { memcpy(&sd_entry->u.bp.addr, &dma_mem, sizeof(sd_entry->u.bp.addr)); sd_entry->u.bp.sd_pd_index = sd_index; } hmc_info->sd_table.sd_entry[sd_index].entry_type = type; hmc_info->sd_table.use_cnt++; } if (sd_entry->entry_type == IRDMA_SD_TYPE_DIRECT) sd_entry->u.bp.use_cnt++; return 0; } /** * irdma_add_pd_table_entry - Adds page descriptor to the specified table * @dev: pointer to our device structure * @hmc_info: pointer to the HMC configuration information structure * @pd_index: which page descriptor index to manipulate * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one. * * This function: * 1. Initializes the pd entry * 2. Adds pd_entry in the pd_table * 3. Mark the entry valid in irdma_hmc_pd_entry structure * 4. Initializes the pd_entry's ref count to 1 * assumptions: * 1. The memory for pd should be pinned down, physically contiguous and * aligned on 4K boundary and zeroed memory. * 2. It should be 4K in size. */ int irdma_add_pd_table_entry(struct irdma_sc_dev *dev, struct irdma_hmc_info *hmc_info, u32 pd_index, struct irdma_dma_mem *rsrc_pg) { struct irdma_hmc_pd_table *pd_table; struct irdma_hmc_pd_entry *pd_entry; struct irdma_dma_mem mem; struct irdma_dma_mem *page = &mem; u32 sd_idx, rel_pd_idx; u64 *pd_addr; u64 page_desc; if (pd_index / IRDMA_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) return -EINVAL; sd_idx = (pd_index / IRDMA_HMC_PD_CNT_IN_SD); if (hmc_info->sd_table.sd_entry[sd_idx].entry_type != IRDMA_SD_TYPE_PAGED) return 0; rel_pd_idx = (pd_index % IRDMA_HMC_PD_CNT_IN_SD); pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; pd_entry = &pd_table->pd_entry[rel_pd_idx]; if (!pd_entry->valid) { if (rsrc_pg) { pd_entry->rsrc_pg = true; page = rsrc_pg; } else { page->size = ALIGN(IRDMA_HMC_PAGED_BP_SIZE, IRDMA_HMC_PD_BP_BUF_ALIGNMENT); page->va = dma_alloc_coherent(dev->hw->device, page->size, &page->pa, GFP_KERNEL); if (!page->va) return -ENOMEM; pd_entry->rsrc_pg = false; } memcpy(&pd_entry->bp.addr, page, sizeof(pd_entry->bp.addr)); pd_entry->bp.sd_pd_index = pd_index; pd_entry->bp.entry_type = IRDMA_SD_TYPE_PAGED; page_desc = page->pa | 0x1; pd_addr = pd_table->pd_page_addr.va; pd_addr += rel_pd_idx; memcpy(pd_addr, &page_desc, sizeof(*pd_addr)); pd_entry->sd_index = sd_idx; pd_entry->valid = true; pd_table->use_cnt++; irdma_invalidate_pf_hmc_pd(dev, sd_idx, rel_pd_idx); } pd_entry->bp.use_cnt++; return 0; } /** * irdma_remove_pd_bp - remove a backing page from a page descriptor * @dev: pointer to our HW structure * @hmc_info: pointer to the HMC configuration information structure * @idx: the page index * * This function: * 1. Marks the entry in pd table (for paged address mode) or in sd table * (for direct address mode) invalid. * 2. Write to register PMPDINV to invalidate the backing page in FV cache * 3. Decrement the ref count for the pd _entry * assumptions: * 1. Caller can deallocate the memory used by backing storage after this * function returns. */ int irdma_remove_pd_bp(struct irdma_sc_dev *dev, struct irdma_hmc_info *hmc_info, u32 idx) { struct irdma_hmc_pd_entry *pd_entry; struct irdma_hmc_pd_table *pd_table; struct irdma_hmc_sd_entry *sd_entry; u32 sd_idx, rel_pd_idx; struct irdma_dma_mem *mem; u64 *pd_addr; sd_idx = idx / IRDMA_HMC_PD_CNT_IN_SD; rel_pd_idx = idx % IRDMA_HMC_PD_CNT_IN_SD; if (sd_idx >= hmc_info->sd_table.sd_cnt) return -EINVAL; sd_entry = &hmc_info->sd_table.sd_entry[sd_idx]; if (sd_entry->entry_type != IRDMA_SD_TYPE_PAGED) return -EINVAL; pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; pd_entry = &pd_table->pd_entry[rel_pd_idx]; if (--pd_entry->bp.use_cnt) return 0; pd_entry->valid = false; pd_table->use_cnt--; pd_addr = pd_table->pd_page_addr.va; pd_addr += rel_pd_idx; memset(pd_addr, 0, sizeof(u64)); irdma_invalidate_pf_hmc_pd(dev, sd_idx, idx); if (!pd_entry->rsrc_pg) { mem = &pd_entry->bp.addr; if (!mem || !mem->va) return -EINVAL; dma_free_coherent(dev->hw->device, mem->size, mem->va, mem->pa); mem->va = NULL; } if (!pd_table->use_cnt) kfree(pd_table->pd_entry_virt_mem.va); return 0; } /** * irdma_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry * @hmc_info: pointer to the HMC configuration information structure * @idx: the page index */ int irdma_prep_remove_sd_bp(struct irdma_hmc_info *hmc_info, u32 idx) { struct irdma_hmc_sd_entry *sd_entry; sd_entry = &hmc_info->sd_table.sd_entry[idx]; if (--sd_entry->u.bp.use_cnt) return -EBUSY; hmc_info->sd_table.use_cnt--; sd_entry->valid = false; return 0; } /** * irdma_prep_remove_pd_page - Prepares to remove a PD page from sd entry. * @hmc_info: pointer to the HMC configuration information structure * @idx: segment descriptor index to find the relevant page descriptor */ int irdma_prep_remove_pd_page(struct irdma_hmc_info *hmc_info, u32 idx) { struct irdma_hmc_sd_entry *sd_entry; sd_entry = &hmc_info->sd_table.sd_entry[idx]; if (sd_entry->u.pd_table.use_cnt) return -EBUSY; sd_entry->valid = false; hmc_info->sd_table.use_cnt--; return 0; }
linux-master
drivers/infiniband/hw/irdma/hmc.c
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause /* * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved. */ #include "efa_com.h" #include "efa_regs_defs.h" #define ADMIN_CMD_TIMEOUT_US 30000000 /* usecs */ #define EFA_REG_READ_TIMEOUT_US 50000 /* usecs */ #define EFA_MMIO_READ_INVALID 0xffffffff #define EFA_POLL_INTERVAL_MS 100 /* msecs */ #define EFA_ASYNC_QUEUE_DEPTH 16 #define EFA_ADMIN_QUEUE_DEPTH 32 #define EFA_CTRL_MAJOR 0 #define EFA_CTRL_MINOR 0 #define EFA_CTRL_SUB_MINOR 1 enum efa_cmd_status { EFA_CMD_SUBMITTED, EFA_CMD_COMPLETED, }; struct efa_comp_ctx { struct completion wait_event; struct efa_admin_acq_entry *user_cqe; u32 comp_size; enum efa_cmd_status status; u8 cmd_opcode; u8 occupied; }; static const char *efa_com_cmd_str(u8 cmd) { #define EFA_CMD_STR_CASE(_cmd) case EFA_ADMIN_##_cmd: return #_cmd switch (cmd) { EFA_CMD_STR_CASE(CREATE_QP); EFA_CMD_STR_CASE(MODIFY_QP); EFA_CMD_STR_CASE(QUERY_QP); EFA_CMD_STR_CASE(DESTROY_QP); EFA_CMD_STR_CASE(CREATE_AH); EFA_CMD_STR_CASE(DESTROY_AH); EFA_CMD_STR_CASE(REG_MR); EFA_CMD_STR_CASE(DEREG_MR); EFA_CMD_STR_CASE(CREATE_CQ); EFA_CMD_STR_CASE(DESTROY_CQ); EFA_CMD_STR_CASE(GET_FEATURE); EFA_CMD_STR_CASE(SET_FEATURE); EFA_CMD_STR_CASE(GET_STATS); EFA_CMD_STR_CASE(ALLOC_PD); EFA_CMD_STR_CASE(DEALLOC_PD); EFA_CMD_STR_CASE(ALLOC_UAR); EFA_CMD_STR_CASE(DEALLOC_UAR); EFA_CMD_STR_CASE(CREATE_EQ); EFA_CMD_STR_CASE(DESTROY_EQ); default: return "unknown command opcode"; } #undef EFA_CMD_STR_CASE } void efa_com_set_dma_addr(dma_addr_t addr, u32 *addr_high, u32 *addr_low) { *addr_low = lower_32_bits(addr); *addr_high = upper_32_bits(addr); } static u32 efa_com_reg_read32(struct efa_com_dev *edev, u16 offset) { struct efa_com_mmio_read *mmio_read = &edev->mmio_read; struct efa_admin_mmio_req_read_less_resp *read_resp; unsigned long exp_time; u32 mmio_read_reg = 0; u32 err; read_resp = mmio_read->read_resp; spin_lock(&mmio_read->lock); mmio_read->seq_num++; /* trash DMA req_id to identify when hardware is done */ read_resp->req_id = mmio_read->seq_num + 0x9aL; EFA_SET(&mmio_read_reg, EFA_REGS_MMIO_REG_READ_REG_OFF, offset); EFA_SET(&mmio_read_reg, EFA_REGS_MMIO_REG_READ_REQ_ID, mmio_read->seq_num); writel(mmio_read_reg, edev->reg_bar + EFA_REGS_MMIO_REG_READ_OFF); exp_time = jiffies + usecs_to_jiffies(mmio_read->mmio_read_timeout); do { if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num) break; udelay(1); } while (time_is_after_jiffies(exp_time)); if (read_resp->req_id != mmio_read->seq_num) { ibdev_err_ratelimited( edev->efa_dev, "Reading register timed out. expected: req id[%u] offset[%#x] actual: req id[%u] offset[%#x]\n", mmio_read->seq_num, offset, read_resp->req_id, read_resp->reg_off); err = EFA_MMIO_READ_INVALID; goto out; } if (read_resp->reg_off != offset) { ibdev_err_ratelimited( edev->efa_dev, "Reading register failed: wrong offset provided\n"); err = EFA_MMIO_READ_INVALID; goto out; } err = read_resp->reg_val; out: spin_unlock(&mmio_read->lock); return err; } static int efa_com_admin_init_sq(struct efa_com_dev *edev) { struct efa_com_admin_queue *aq = &edev->aq; struct efa_com_admin_sq *sq = &aq->sq; u16 size = aq->depth * sizeof(*sq->entries); u32 aq_caps = 0; u32 addr_high; u32 addr_low; sq->entries = dma_alloc_coherent(aq->dmadev, size, &sq->dma_addr, GFP_KERNEL); if (!sq->entries) return -ENOMEM; spin_lock_init(&sq->lock); sq->cc = 0; sq->pc = 0; sq->phase = 1; sq->db_addr = (u32 __iomem *)(edev->reg_bar + EFA_REGS_AQ_PROD_DB_OFF); addr_high = upper_32_bits(sq->dma_addr); addr_low = lower_32_bits(sq->dma_addr); writel(addr_low, edev->reg_bar + EFA_REGS_AQ_BASE_LO_OFF); writel(addr_high, edev->reg_bar + EFA_REGS_AQ_BASE_HI_OFF); EFA_SET(&aq_caps, EFA_REGS_AQ_CAPS_AQ_DEPTH, aq->depth); EFA_SET(&aq_caps, EFA_REGS_AQ_CAPS_AQ_ENTRY_SIZE, sizeof(struct efa_admin_aq_entry)); writel(aq_caps, edev->reg_bar + EFA_REGS_AQ_CAPS_OFF); return 0; } static int efa_com_admin_init_cq(struct efa_com_dev *edev) { struct efa_com_admin_queue *aq = &edev->aq; struct efa_com_admin_cq *cq = &aq->cq; u16 size = aq->depth * sizeof(*cq->entries); u32 acq_caps = 0; u32 addr_high; u32 addr_low; cq->entries = dma_alloc_coherent(aq->dmadev, size, &cq->dma_addr, GFP_KERNEL); if (!cq->entries) return -ENOMEM; spin_lock_init(&cq->lock); cq->cc = 0; cq->phase = 1; addr_high = upper_32_bits(cq->dma_addr); addr_low = lower_32_bits(cq->dma_addr); writel(addr_low, edev->reg_bar + EFA_REGS_ACQ_BASE_LO_OFF); writel(addr_high, edev->reg_bar + EFA_REGS_ACQ_BASE_HI_OFF); EFA_SET(&acq_caps, EFA_REGS_ACQ_CAPS_ACQ_DEPTH, aq->depth); EFA_SET(&acq_caps, EFA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE, sizeof(struct efa_admin_acq_entry)); EFA_SET(&acq_caps, EFA_REGS_ACQ_CAPS_ACQ_MSIX_VECTOR, aq->msix_vector_idx); writel(acq_caps, edev->reg_bar + EFA_REGS_ACQ_CAPS_OFF); return 0; } static int efa_com_admin_init_aenq(struct efa_com_dev *edev, struct efa_aenq_handlers *aenq_handlers) { struct efa_com_aenq *aenq = &edev->aenq; u32 addr_low, addr_high; u32 aenq_caps = 0; u16 size; if (!aenq_handlers) { ibdev_err(edev->efa_dev, "aenq handlers pointer is NULL\n"); return -EINVAL; } size = EFA_ASYNC_QUEUE_DEPTH * sizeof(*aenq->entries); aenq->entries = dma_alloc_coherent(edev->dmadev, size, &aenq->dma_addr, GFP_KERNEL); if (!aenq->entries) return -ENOMEM; aenq->aenq_handlers = aenq_handlers; aenq->depth = EFA_ASYNC_QUEUE_DEPTH; aenq->cc = 0; aenq->phase = 1; addr_low = lower_32_bits(aenq->dma_addr); addr_high = upper_32_bits(aenq->dma_addr); writel(addr_low, edev->reg_bar + EFA_REGS_AENQ_BASE_LO_OFF); writel(addr_high, edev->reg_bar + EFA_REGS_AENQ_BASE_HI_OFF); EFA_SET(&aenq_caps, EFA_REGS_AENQ_CAPS_AENQ_DEPTH, aenq->depth); EFA_SET(&aenq_caps, EFA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE, sizeof(struct efa_admin_aenq_entry)); EFA_SET(&aenq_caps, EFA_REGS_AENQ_CAPS_AENQ_MSIX_VECTOR, aenq->msix_vector_idx); writel(aenq_caps, edev->reg_bar + EFA_REGS_AENQ_CAPS_OFF); /* * Init cons_db to mark that all entries in the queue * are initially available */ writel(edev->aenq.cc, edev->reg_bar + EFA_REGS_AENQ_CONS_DB_OFF); return 0; } /* ID to be used with efa_com_get_comp_ctx */ static u16 efa_com_alloc_ctx_id(struct efa_com_admin_queue *aq) { u16 ctx_id; spin_lock(&aq->comp_ctx_lock); ctx_id = aq->comp_ctx_pool[aq->comp_ctx_pool_next]; aq->comp_ctx_pool_next++; spin_unlock(&aq->comp_ctx_lock); return ctx_id; } static void efa_com_dealloc_ctx_id(struct efa_com_admin_queue *aq, u16 ctx_id) { spin_lock(&aq->comp_ctx_lock); aq->comp_ctx_pool_next--; aq->comp_ctx_pool[aq->comp_ctx_pool_next] = ctx_id; spin_unlock(&aq->comp_ctx_lock); } static inline void efa_com_put_comp_ctx(struct efa_com_admin_queue *aq, struct efa_comp_ctx *comp_ctx) { u16 cmd_id = EFA_GET(&comp_ctx->user_cqe->acq_common_descriptor.command, EFA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID); u16 ctx_id = cmd_id & (aq->depth - 1); ibdev_dbg(aq->efa_dev, "Put completion command_id %#x\n", cmd_id); comp_ctx->occupied = 0; efa_com_dealloc_ctx_id(aq, ctx_id); } static struct efa_comp_ctx *efa_com_get_comp_ctx(struct efa_com_admin_queue *aq, u16 cmd_id, bool capture) { u16 ctx_id = cmd_id & (aq->depth - 1); if (aq->comp_ctx[ctx_id].occupied && capture) { ibdev_err_ratelimited( aq->efa_dev, "Completion context for command_id %#x is occupied\n", cmd_id); return NULL; } if (capture) { aq->comp_ctx[ctx_id].occupied = 1; ibdev_dbg(aq->efa_dev, "Take completion ctxt for command_id %#x\n", cmd_id); } return &aq->comp_ctx[ctx_id]; } static struct efa_comp_ctx *__efa_com_submit_admin_cmd(struct efa_com_admin_queue *aq, struct efa_admin_aq_entry *cmd, size_t cmd_size_in_bytes, struct efa_admin_acq_entry *comp, size_t comp_size_in_bytes) { struct efa_admin_aq_entry *aqe; struct efa_comp_ctx *comp_ctx; u16 queue_size_mask; u16 cmd_id; u16 ctx_id; u16 pi; queue_size_mask = aq->depth - 1; pi = aq->sq.pc & queue_size_mask; ctx_id = efa_com_alloc_ctx_id(aq); /* cmd_id LSBs are the ctx_id and MSBs are entropy bits from pc */ cmd_id = ctx_id & queue_size_mask; cmd_id |= aq->sq.pc & ~queue_size_mask; cmd_id &= EFA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK; cmd->aq_common_descriptor.command_id = cmd_id; EFA_SET(&cmd->aq_common_descriptor.flags, EFA_ADMIN_AQ_COMMON_DESC_PHASE, aq->sq.phase); comp_ctx = efa_com_get_comp_ctx(aq, cmd_id, true); if (!comp_ctx) { efa_com_dealloc_ctx_id(aq, ctx_id); return ERR_PTR(-EINVAL); } comp_ctx->status = EFA_CMD_SUBMITTED; comp_ctx->comp_size = comp_size_in_bytes; comp_ctx->user_cqe = comp; comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode; reinit_completion(&comp_ctx->wait_event); aqe = &aq->sq.entries[pi]; memset(aqe, 0, sizeof(*aqe)); memcpy(aqe, cmd, cmd_size_in_bytes); aq->sq.pc++; atomic64_inc(&aq->stats.submitted_cmd); if ((aq->sq.pc & queue_size_mask) == 0) aq->sq.phase = !aq->sq.phase; /* barrier not needed in case of writel */ writel(aq->sq.pc, aq->sq.db_addr); return comp_ctx; } static inline int efa_com_init_comp_ctxt(struct efa_com_admin_queue *aq) { size_t pool_size = aq->depth * sizeof(*aq->comp_ctx_pool); size_t size = aq->depth * sizeof(struct efa_comp_ctx); struct efa_comp_ctx *comp_ctx; u16 i; aq->comp_ctx = devm_kzalloc(aq->dmadev, size, GFP_KERNEL); aq->comp_ctx_pool = devm_kzalloc(aq->dmadev, pool_size, GFP_KERNEL); if (!aq->comp_ctx || !aq->comp_ctx_pool) { devm_kfree(aq->dmadev, aq->comp_ctx_pool); devm_kfree(aq->dmadev, aq->comp_ctx); return -ENOMEM; } for (i = 0; i < aq->depth; i++) { comp_ctx = efa_com_get_comp_ctx(aq, i, false); if (comp_ctx) init_completion(&comp_ctx->wait_event); aq->comp_ctx_pool[i] = i; } spin_lock_init(&aq->comp_ctx_lock); aq->comp_ctx_pool_next = 0; return 0; } static struct efa_comp_ctx *efa_com_submit_admin_cmd(struct efa_com_admin_queue *aq, struct efa_admin_aq_entry *cmd, size_t cmd_size_in_bytes, struct efa_admin_acq_entry *comp, size_t comp_size_in_bytes) { struct efa_comp_ctx *comp_ctx; spin_lock(&aq->sq.lock); if (!test_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state)) { ibdev_err_ratelimited(aq->efa_dev, "Admin queue is closed\n"); spin_unlock(&aq->sq.lock); return ERR_PTR(-ENODEV); } comp_ctx = __efa_com_submit_admin_cmd(aq, cmd, cmd_size_in_bytes, comp, comp_size_in_bytes); spin_unlock(&aq->sq.lock); if (IS_ERR(comp_ctx)) clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state); return comp_ctx; } static void efa_com_handle_single_admin_completion(struct efa_com_admin_queue *aq, struct efa_admin_acq_entry *cqe) { struct efa_comp_ctx *comp_ctx; u16 cmd_id; cmd_id = EFA_GET(&cqe->acq_common_descriptor.command, EFA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID); comp_ctx = efa_com_get_comp_ctx(aq, cmd_id, false); if (!comp_ctx) { ibdev_err(aq->efa_dev, "comp_ctx is NULL. Changing the admin queue running state\n"); clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state); return; } comp_ctx->status = EFA_CMD_COMPLETED; memcpy(comp_ctx->user_cqe, cqe, comp_ctx->comp_size); if (!test_bit(EFA_AQ_STATE_POLLING_BIT, &aq->state)) complete(&comp_ctx->wait_event); } static void efa_com_handle_admin_completion(struct efa_com_admin_queue *aq) { struct efa_admin_acq_entry *cqe; u16 queue_size_mask; u16 comp_num = 0; u8 phase; u16 ci; queue_size_mask = aq->depth - 1; ci = aq->cq.cc & queue_size_mask; phase = aq->cq.phase; cqe = &aq->cq.entries[ci]; /* Go over all the completions */ while ((READ_ONCE(cqe->acq_common_descriptor.flags) & EFA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) { /* * Do not read the rest of the completion entry before the * phase bit was validated */ dma_rmb(); efa_com_handle_single_admin_completion(aq, cqe); ci++; comp_num++; if (ci == aq->depth) { ci = 0; phase = !phase; } cqe = &aq->cq.entries[ci]; } aq->cq.cc += comp_num; aq->cq.phase = phase; aq->sq.cc += comp_num; atomic64_add(comp_num, &aq->stats.completed_cmd); } static int efa_com_comp_status_to_errno(u8 comp_status) { switch (comp_status) { case EFA_ADMIN_SUCCESS: return 0; case EFA_ADMIN_RESOURCE_ALLOCATION_FAILURE: return -ENOMEM; case EFA_ADMIN_UNSUPPORTED_OPCODE: return -EOPNOTSUPP; case EFA_ADMIN_BAD_OPCODE: case EFA_ADMIN_MALFORMED_REQUEST: case EFA_ADMIN_ILLEGAL_PARAMETER: case EFA_ADMIN_UNKNOWN_ERROR: return -EINVAL; default: return -EINVAL; } } static int efa_com_wait_and_process_admin_cq_polling(struct efa_comp_ctx *comp_ctx, struct efa_com_admin_queue *aq) { unsigned long timeout; unsigned long flags; int err; timeout = jiffies + usecs_to_jiffies(aq->completion_timeout); while (1) { spin_lock_irqsave(&aq->cq.lock, flags); efa_com_handle_admin_completion(aq); spin_unlock_irqrestore(&aq->cq.lock, flags); if (comp_ctx->status != EFA_CMD_SUBMITTED) break; if (time_is_before_jiffies(timeout)) { ibdev_err_ratelimited( aq->efa_dev, "Wait for completion (polling) timeout\n"); /* EFA didn't have any completion */ atomic64_inc(&aq->stats.no_completion); clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state); err = -ETIME; goto out; } msleep(aq->poll_interval); } err = efa_com_comp_status_to_errno(comp_ctx->user_cqe->acq_common_descriptor.status); out: efa_com_put_comp_ctx(aq, comp_ctx); return err; } static int efa_com_wait_and_process_admin_cq_interrupts(struct efa_comp_ctx *comp_ctx, struct efa_com_admin_queue *aq) { unsigned long flags; int err; wait_for_completion_timeout(&comp_ctx->wait_event, usecs_to_jiffies(aq->completion_timeout)); /* * In case the command wasn't completed find out the root cause. * There might be 2 kinds of errors * 1) No completion (timeout reached) * 2) There is completion but the device didn't get any msi-x interrupt. */ if (comp_ctx->status == EFA_CMD_SUBMITTED) { spin_lock_irqsave(&aq->cq.lock, flags); efa_com_handle_admin_completion(aq); spin_unlock_irqrestore(&aq->cq.lock, flags); atomic64_inc(&aq->stats.no_completion); if (comp_ctx->status == EFA_CMD_COMPLETED) ibdev_err_ratelimited( aq->efa_dev, "The device sent a completion but the driver didn't receive any MSI-X interrupt for admin cmd %s(%d) status %d (ctx: 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n", efa_com_cmd_str(comp_ctx->cmd_opcode), comp_ctx->cmd_opcode, comp_ctx->status, comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc); else ibdev_err_ratelimited( aq->efa_dev, "The device didn't send any completion for admin cmd %s(%d) status %d (ctx 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n", efa_com_cmd_str(comp_ctx->cmd_opcode), comp_ctx->cmd_opcode, comp_ctx->status, comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc); clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state); err = -ETIME; goto out; } err = efa_com_comp_status_to_errno(comp_ctx->user_cqe->acq_common_descriptor.status); out: efa_com_put_comp_ctx(aq, comp_ctx); return err; } /* * There are two types to wait for completion. * Polling mode - wait until the completion is available. * Async mode - wait on wait queue until the completion is ready * (or the timeout expired). * It is expected that the IRQ called efa_com_handle_admin_completion * to mark the completions. */ static int efa_com_wait_and_process_admin_cq(struct efa_comp_ctx *comp_ctx, struct efa_com_admin_queue *aq) { if (test_bit(EFA_AQ_STATE_POLLING_BIT, &aq->state)) return efa_com_wait_and_process_admin_cq_polling(comp_ctx, aq); return efa_com_wait_and_process_admin_cq_interrupts(comp_ctx, aq); } /** * efa_com_cmd_exec - Execute admin command * @aq: admin queue. * @cmd: the admin command to execute. * @cmd_size: the command size. * @comp: command completion return entry. * @comp_size: command completion size. * Submit an admin command and then wait until the device will return a * completion. * The completion will be copied into comp. * * @return - 0 on success, negative value on failure. */ int efa_com_cmd_exec(struct efa_com_admin_queue *aq, struct efa_admin_aq_entry *cmd, size_t cmd_size, struct efa_admin_acq_entry *comp, size_t comp_size) { struct efa_comp_ctx *comp_ctx; int err; might_sleep(); /* In case of queue FULL */ down(&aq->avail_cmds); ibdev_dbg(aq->efa_dev, "%s (opcode %d)\n", efa_com_cmd_str(cmd->aq_common_descriptor.opcode), cmd->aq_common_descriptor.opcode); comp_ctx = efa_com_submit_admin_cmd(aq, cmd, cmd_size, comp, comp_size); if (IS_ERR(comp_ctx)) { ibdev_err_ratelimited( aq->efa_dev, "Failed to submit command %s (opcode %u) err %ld\n", efa_com_cmd_str(cmd->aq_common_descriptor.opcode), cmd->aq_common_descriptor.opcode, PTR_ERR(comp_ctx)); up(&aq->avail_cmds); atomic64_inc(&aq->stats.cmd_err); return PTR_ERR(comp_ctx); } err = efa_com_wait_and_process_admin_cq(comp_ctx, aq); if (err) { ibdev_err_ratelimited( aq->efa_dev, "Failed to process command %s (opcode %u) comp_status %d err %d\n", efa_com_cmd_str(cmd->aq_common_descriptor.opcode), cmd->aq_common_descriptor.opcode, comp_ctx->user_cqe->acq_common_descriptor.status, err); atomic64_inc(&aq->stats.cmd_err); } up(&aq->avail_cmds); return err; } /** * efa_com_admin_destroy - Destroy the admin and the async events queues. * @edev: EFA communication layer struct */ void efa_com_admin_destroy(struct efa_com_dev *edev) { struct efa_com_admin_queue *aq = &edev->aq; struct efa_com_aenq *aenq = &edev->aenq; struct efa_com_admin_cq *cq = &aq->cq; struct efa_com_admin_sq *sq = &aq->sq; u16 size; clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state); devm_kfree(edev->dmadev, aq->comp_ctx_pool); devm_kfree(edev->dmadev, aq->comp_ctx); size = aq->depth * sizeof(*sq->entries); dma_free_coherent(edev->dmadev, size, sq->entries, sq->dma_addr); size = aq->depth * sizeof(*cq->entries); dma_free_coherent(edev->dmadev, size, cq->entries, cq->dma_addr); size = aenq->depth * sizeof(*aenq->entries); dma_free_coherent(edev->dmadev, size, aenq->entries, aenq->dma_addr); } /** * efa_com_set_admin_polling_mode - Set the admin completion queue polling mode * @edev: EFA communication layer struct * @polling: Enable/Disable polling mode * * Set the admin completion mode. */ void efa_com_set_admin_polling_mode(struct efa_com_dev *edev, bool polling) { u32 mask_value = 0; if (polling) EFA_SET(&mask_value, EFA_REGS_INTR_MASK_EN, 1); writel(mask_value, edev->reg_bar + EFA_REGS_INTR_MASK_OFF); if (polling) set_bit(EFA_AQ_STATE_POLLING_BIT, &edev->aq.state); else clear_bit(EFA_AQ_STATE_POLLING_BIT, &edev->aq.state); } static void efa_com_stats_init(struct efa_com_dev *edev) { atomic64_t *s = (atomic64_t *)&edev->aq.stats; int i; for (i = 0; i < sizeof(edev->aq.stats) / sizeof(*s); i++, s++) atomic64_set(s, 0); } /** * efa_com_admin_init - Init the admin and the async queues * @edev: EFA communication layer struct * @aenq_handlers: Those handlers to be called upon event. * * Initialize the admin submission and completion queues. * Initialize the asynchronous events notification queues. * * @return - 0 on success, negative value on failure. */ int efa_com_admin_init(struct efa_com_dev *edev, struct efa_aenq_handlers *aenq_handlers) { struct efa_com_admin_queue *aq = &edev->aq; u32 timeout; u32 dev_sts; u32 cap; int err; dev_sts = efa_com_reg_read32(edev, EFA_REGS_DEV_STS_OFF); if (!EFA_GET(&dev_sts, EFA_REGS_DEV_STS_READY)) { ibdev_err(edev->efa_dev, "Device isn't ready, abort com init %#x\n", dev_sts); return -ENODEV; } aq->depth = EFA_ADMIN_QUEUE_DEPTH; aq->dmadev = edev->dmadev; aq->efa_dev = edev->efa_dev; set_bit(EFA_AQ_STATE_POLLING_BIT, &aq->state); sema_init(&aq->avail_cmds, aq->depth); efa_com_stats_init(edev); err = efa_com_init_comp_ctxt(aq); if (err) return err; err = efa_com_admin_init_sq(edev); if (err) goto err_destroy_comp_ctxt; err = efa_com_admin_init_cq(edev); if (err) goto err_destroy_sq; efa_com_set_admin_polling_mode(edev, false); err = efa_com_admin_init_aenq(edev, aenq_handlers); if (err) goto err_destroy_cq; cap = efa_com_reg_read32(edev, EFA_REGS_CAPS_OFF); timeout = EFA_GET(&cap, EFA_REGS_CAPS_ADMIN_CMD_TO); if (timeout) /* the resolution of timeout reg is 100ms */ aq->completion_timeout = timeout * 100000; else aq->completion_timeout = ADMIN_CMD_TIMEOUT_US; aq->poll_interval = EFA_POLL_INTERVAL_MS; set_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state); return 0; err_destroy_cq: dma_free_coherent(edev->dmadev, aq->depth * sizeof(*aq->cq.entries), aq->cq.entries, aq->cq.dma_addr); err_destroy_sq: dma_free_coherent(edev->dmadev, aq->depth * sizeof(*aq->sq.entries), aq->sq.entries, aq->sq.dma_addr); err_destroy_comp_ctxt: devm_kfree(edev->dmadev, aq->comp_ctx); return err; } /** * efa_com_admin_q_comp_intr_handler - admin queue interrupt handler * @edev: EFA communication layer struct * * This method goes over the admin completion queue and wakes up * all the pending threads that wait on the commands wait event. * * Note: Should be called after MSI-X interrupt. */ void efa_com_admin_q_comp_intr_handler(struct efa_com_dev *edev) { unsigned long flags; spin_lock_irqsave(&edev->aq.cq.lock, flags); efa_com_handle_admin_completion(&edev->aq); spin_unlock_irqrestore(&edev->aq.cq.lock, flags); } /* * efa_handle_specific_aenq_event: * return the handler that is relevant to the specific event group */ static efa_aenq_handler efa_com_get_specific_aenq_cb(struct efa_com_dev *edev, u16 group) { struct efa_aenq_handlers *aenq_handlers = edev->aenq.aenq_handlers; if (group < EFA_MAX_HANDLERS && aenq_handlers->handlers[group]) return aenq_handlers->handlers[group]; return aenq_handlers->unimplemented_handler; } /** * efa_com_aenq_intr_handler - AENQ interrupt handler * @edev: EFA communication layer struct * @data: Data of interrupt handler. * * Go over the async event notification queue and call the proper aenq handler. */ void efa_com_aenq_intr_handler(struct efa_com_dev *edev, void *data) { struct efa_admin_aenq_common_desc *aenq_common; struct efa_com_aenq *aenq = &edev->aenq; struct efa_admin_aenq_entry *aenq_e; efa_aenq_handler handler_cb; u32 processed = 0; u8 phase; u32 ci; ci = aenq->cc & (aenq->depth - 1); phase = aenq->phase; aenq_e = &aenq->entries[ci]; /* Get first entry */ aenq_common = &aenq_e->aenq_common_desc; /* Go over all the events */ while ((READ_ONCE(aenq_common->flags) & EFA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) { /* * Do not read the rest of the completion entry before the * phase bit was validated */ dma_rmb(); /* Handle specific event*/ handler_cb = efa_com_get_specific_aenq_cb(edev, aenq_common->group); handler_cb(data, aenq_e); /* call the actual event handler*/ /* Get next event entry */ ci++; processed++; if (ci == aenq->depth) { ci = 0; phase = !phase; } aenq_e = &aenq->entries[ci]; aenq_common = &aenq_e->aenq_common_desc; } aenq->cc += processed; aenq->phase = phase; /* Don't update aenq doorbell if there weren't any processed events */ if (!processed) return; /* barrier not needed in case of writel */ writel(aenq->cc, edev->reg_bar + EFA_REGS_AENQ_CONS_DB_OFF); } static void efa_com_mmio_reg_read_resp_addr_init(struct efa_com_dev *edev) { struct efa_com_mmio_read *mmio_read = &edev->mmio_read; u32 addr_high; u32 addr_low; /* dma_addr_bits is unknown at this point */ addr_high = (mmio_read->read_resp_dma_addr >> 32) & GENMASK(31, 0); addr_low = mmio_read->read_resp_dma_addr & GENMASK(31, 0); writel(addr_high, edev->reg_bar + EFA_REGS_MMIO_RESP_HI_OFF); writel(addr_low, edev->reg_bar + EFA_REGS_MMIO_RESP_LO_OFF); } int efa_com_mmio_reg_read_init(struct efa_com_dev *edev) { struct efa_com_mmio_read *mmio_read = &edev->mmio_read; spin_lock_init(&mmio_read->lock); mmio_read->read_resp = dma_alloc_coherent(edev->dmadev, sizeof(*mmio_read->read_resp), &mmio_read->read_resp_dma_addr, GFP_KERNEL); if (!mmio_read->read_resp) return -ENOMEM; efa_com_mmio_reg_read_resp_addr_init(edev); mmio_read->read_resp->req_id = 0; mmio_read->seq_num = 0; mmio_read->mmio_read_timeout = EFA_REG_READ_TIMEOUT_US; return 0; } void efa_com_mmio_reg_read_destroy(struct efa_com_dev *edev) { struct efa_com_mmio_read *mmio_read = &edev->mmio_read; dma_free_coherent(edev->dmadev, sizeof(*mmio_read->read_resp), mmio_read->read_resp, mmio_read->read_resp_dma_addr); } int efa_com_validate_version(struct efa_com_dev *edev) { u32 min_ctrl_ver = 0; u32 ctrl_ver_masked; u32 min_ver = 0; u32 ctrl_ver; u32 ver; /* * Make sure the EFA version and the controller version are at least * as the driver expects */ ver = efa_com_reg_read32(edev, EFA_REGS_VERSION_OFF); ctrl_ver = efa_com_reg_read32(edev, EFA_REGS_CONTROLLER_VERSION_OFF); ibdev_dbg(edev->efa_dev, "efa device version: %d.%d\n", EFA_GET(&ver, EFA_REGS_VERSION_MAJOR_VERSION), EFA_GET(&ver, EFA_REGS_VERSION_MINOR_VERSION)); EFA_SET(&min_ver, EFA_REGS_VERSION_MAJOR_VERSION, EFA_ADMIN_API_VERSION_MAJOR); EFA_SET(&min_ver, EFA_REGS_VERSION_MINOR_VERSION, EFA_ADMIN_API_VERSION_MINOR); if (ver < min_ver) { ibdev_err(edev->efa_dev, "EFA version is lower than the minimal version the driver supports\n"); return -EOPNOTSUPP; } ibdev_dbg( edev->efa_dev, "efa controller version: %d.%d.%d implementation version %d\n", EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION), EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION), EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION), EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_IMPL_ID)); ctrl_ver_masked = EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION) | EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION) | EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION); EFA_SET(&min_ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION, EFA_CTRL_MAJOR); EFA_SET(&min_ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION, EFA_CTRL_MINOR); EFA_SET(&min_ctrl_ver, EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION, EFA_CTRL_SUB_MINOR); /* Validate the ctrl version without the implementation ID */ if (ctrl_ver_masked < min_ctrl_ver) { ibdev_err(edev->efa_dev, "EFA ctrl version is lower than the minimal ctrl version the driver supports\n"); return -EOPNOTSUPP; } return 0; } /** * efa_com_get_dma_width - Retrieve physical dma address width the device * supports. * @edev: EFA communication layer struct * * Retrieve the maximum physical address bits the device can handle. * * @return: > 0 on Success and negative value otherwise. */ int efa_com_get_dma_width(struct efa_com_dev *edev) { u32 caps = efa_com_reg_read32(edev, EFA_REGS_CAPS_OFF); int width; width = EFA_GET(&caps, EFA_REGS_CAPS_DMA_ADDR_WIDTH); ibdev_dbg(edev->efa_dev, "DMA width: %d\n", width); if (width < 32 || width > 64) { ibdev_err(edev->efa_dev, "DMA width illegal value: %d\n", width); return -EINVAL; } edev->dma_addr_bits = width; return width; } static int wait_for_reset_state(struct efa_com_dev *edev, u32 timeout, int on) { u32 val, i; for (i = 0; i < timeout; i++) { val = efa_com_reg_read32(edev, EFA_REGS_DEV_STS_OFF); if (EFA_GET(&val, EFA_REGS_DEV_STS_RESET_IN_PROGRESS) == on) return 0; ibdev_dbg(edev->efa_dev, "Reset indication val %d\n", val); msleep(EFA_POLL_INTERVAL_MS); } return -ETIME; } /** * efa_com_dev_reset - Perform device FLR to the device. * @edev: EFA communication layer struct * @reset_reason: Specify what is the trigger for the reset in case of an error. * * @return - 0 on success, negative value on failure. */ int efa_com_dev_reset(struct efa_com_dev *edev, enum efa_regs_reset_reason_types reset_reason) { u32 stat, timeout, cap; u32 reset_val = 0; int err; stat = efa_com_reg_read32(edev, EFA_REGS_DEV_STS_OFF); cap = efa_com_reg_read32(edev, EFA_REGS_CAPS_OFF); if (!EFA_GET(&stat, EFA_REGS_DEV_STS_READY)) { ibdev_err(edev->efa_dev, "Device isn't ready, can't reset device\n"); return -EINVAL; } timeout = EFA_GET(&cap, EFA_REGS_CAPS_RESET_TIMEOUT); if (!timeout) { ibdev_err(edev->efa_dev, "Invalid timeout value\n"); return -EINVAL; } /* start reset */ EFA_SET(&reset_val, EFA_REGS_DEV_CTL_DEV_RESET, 1); EFA_SET(&reset_val, EFA_REGS_DEV_CTL_RESET_REASON, reset_reason); writel(reset_val, edev->reg_bar + EFA_REGS_DEV_CTL_OFF); /* reset clears the mmio readless address, restore it */ efa_com_mmio_reg_read_resp_addr_init(edev); err = wait_for_reset_state(edev, timeout, 1); if (err) { ibdev_err(edev->efa_dev, "Reset indication didn't turn on\n"); return err; } /* reset done */ writel(0, edev->reg_bar + EFA_REGS_DEV_CTL_OFF); err = wait_for_reset_state(edev, timeout, 0); if (err) { ibdev_err(edev->efa_dev, "Reset indication didn't turn off\n"); return err; } timeout = EFA_GET(&cap, EFA_REGS_CAPS_ADMIN_CMD_TO); if (timeout) /* the resolution of timeout reg is 100ms */ edev->aq.completion_timeout = timeout * 100000; else edev->aq.completion_timeout = ADMIN_CMD_TIMEOUT_US; return 0; } static int efa_com_create_eq(struct efa_com_dev *edev, struct efa_com_create_eq_params *params, struct efa_com_create_eq_result *result) { struct efa_com_admin_queue *aq = &edev->aq; struct efa_admin_create_eq_resp resp = {}; struct efa_admin_create_eq_cmd cmd = {}; int err; cmd.aq_common_descriptor.opcode = EFA_ADMIN_CREATE_EQ; EFA_SET(&cmd.caps, EFA_ADMIN_CREATE_EQ_CMD_ENTRY_SIZE_WORDS, params->entry_size_in_bytes / 4); cmd.depth = params->depth; cmd.event_bitmask = params->event_bitmask; cmd.msix_vec = params->msix_vec; efa_com_set_dma_addr(params->dma_addr, &cmd.ba.mem_addr_high, &cmd.ba.mem_addr_low); err = efa_com_cmd_exec(aq, (struct efa_admin_aq_entry *)&cmd, sizeof(cmd), (struct efa_admin_acq_entry *)&resp, sizeof(resp)); if (err) { ibdev_err_ratelimited(edev->efa_dev, "Failed to create eq[%d]\n", err); return err; } result->eqn = resp.eqn; return 0; } static void efa_com_destroy_eq(struct efa_com_dev *edev, struct efa_com_destroy_eq_params *params) { struct efa_com_admin_queue *aq = &edev->aq; struct efa_admin_destroy_eq_resp resp = {}; struct efa_admin_destroy_eq_cmd cmd = {}; int err; cmd.aq_common_descriptor.opcode = EFA_ADMIN_DESTROY_EQ; cmd.eqn = params->eqn; err = efa_com_cmd_exec(aq, (struct efa_admin_aq_entry *)&cmd, sizeof(cmd), (struct efa_admin_acq_entry *)&resp, sizeof(resp)); if (err) ibdev_err_ratelimited(edev->efa_dev, "Failed to destroy EQ-%u [%d]\n", cmd.eqn, err); } static void efa_com_arm_eq(struct efa_com_dev *edev, struct efa_com_eq *eeq) { u32 val = 0; EFA_SET(&val, EFA_REGS_EQ_DB_EQN, eeq->eqn); EFA_SET(&val, EFA_REGS_EQ_DB_ARM, 1); writel(val, edev->reg_bar + EFA_REGS_EQ_DB_OFF); } void efa_com_eq_comp_intr_handler(struct efa_com_dev *edev, struct efa_com_eq *eeq) { struct efa_admin_eqe *eqe; u32 processed = 0; u8 phase; u32 ci; ci = eeq->cc & (eeq->depth - 1); phase = eeq->phase; eqe = &eeq->eqes[ci]; /* Go over all the events */ while ((READ_ONCE(eqe->common) & EFA_ADMIN_EQE_PHASE_MASK) == phase) { /* * Do not read the rest of the completion entry before the * phase bit was validated */ dma_rmb(); eeq->cb(eeq, eqe); /* Get next event entry */ ci++; processed++; if (ci == eeq->depth) { ci = 0; phase = !phase; } eqe = &eeq->eqes[ci]; } eeq->cc += processed; eeq->phase = phase; efa_com_arm_eq(eeq->edev, eeq); } void efa_com_eq_destroy(struct efa_com_dev *edev, struct efa_com_eq *eeq) { struct efa_com_destroy_eq_params params = { .eqn = eeq->eqn, }; efa_com_destroy_eq(edev, &params); dma_free_coherent(edev->dmadev, eeq->depth * sizeof(*eeq->eqes), eeq->eqes, eeq->dma_addr); } int efa_com_eq_init(struct efa_com_dev *edev, struct efa_com_eq *eeq, efa_eqe_handler cb, u16 depth, u8 msix_vec) { struct efa_com_create_eq_params params = {}; struct efa_com_create_eq_result result = {}; int err; params.depth = depth; params.entry_size_in_bytes = sizeof(*eeq->eqes); EFA_SET(&params.event_bitmask, EFA_ADMIN_CREATE_EQ_CMD_COMPLETION_EVENTS, 1); params.msix_vec = msix_vec; eeq->eqes = dma_alloc_coherent(edev->dmadev, params.depth * sizeof(*eeq->eqes), &params.dma_addr, GFP_KERNEL); if (!eeq->eqes) return -ENOMEM; err = efa_com_create_eq(edev, &params, &result); if (err) goto err_free_coherent; eeq->eqn = result.eqn; eeq->edev = edev; eeq->dma_addr = params.dma_addr; eeq->phase = 1; eeq->depth = params.depth; eeq->cb = cb; efa_com_arm_eq(edev, eeq); return 0; err_free_coherent: dma_free_coherent(edev->dmadev, params.depth * sizeof(*eeq->eqes), eeq->eqes, params.dma_addr); return err; }
linux-master
drivers/infiniband/hw/efa/efa_com.c
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* * Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All rights reserved. */ #include <linux/dma-buf.h> #include <linux/dma-resv.h> #include <linux/vmalloc.h> #include <linux/log2.h> #include <rdma/ib_addr.h> #include <rdma/ib_umem.h> #include <rdma/ib_user_verbs.h> #include <rdma/ib_verbs.h> #include <rdma/uverbs_ioctl.h> #include "efa.h" #include "efa_io_defs.h" enum { EFA_MMAP_DMA_PAGE = 0, EFA_MMAP_IO_WC, EFA_MMAP_IO_NC, }; #define EFA_AENQ_ENABLED_GROUPS \ (BIT(EFA_ADMIN_FATAL_ERROR) | BIT(EFA_ADMIN_WARNING) | \ BIT(EFA_ADMIN_NOTIFICATION) | BIT(EFA_ADMIN_KEEP_ALIVE)) struct efa_user_mmap_entry { struct rdma_user_mmap_entry rdma_entry; u64 address; u8 mmap_flag; }; #define EFA_DEFINE_DEVICE_STATS(op) \ op(EFA_SUBMITTED_CMDS, "submitted_cmds") \ op(EFA_COMPLETED_CMDS, "completed_cmds") \ op(EFA_CMDS_ERR, "cmds_err") \ op(EFA_NO_COMPLETION_CMDS, "no_completion_cmds") \ op(EFA_KEEP_ALIVE_RCVD, "keep_alive_rcvd") \ op(EFA_ALLOC_PD_ERR, "alloc_pd_err") \ op(EFA_CREATE_QP_ERR, "create_qp_err") \ op(EFA_CREATE_CQ_ERR, "create_cq_err") \ op(EFA_REG_MR_ERR, "reg_mr_err") \ op(EFA_ALLOC_UCONTEXT_ERR, "alloc_ucontext_err") \ op(EFA_CREATE_AH_ERR, "create_ah_err") \ op(EFA_MMAP_ERR, "mmap_err") #define EFA_DEFINE_PORT_STATS(op) \ op(EFA_TX_BYTES, "tx_bytes") \ op(EFA_TX_PKTS, "tx_pkts") \ op(EFA_RX_BYTES, "rx_bytes") \ op(EFA_RX_PKTS, "rx_pkts") \ op(EFA_RX_DROPS, "rx_drops") \ op(EFA_SEND_BYTES, "send_bytes") \ op(EFA_SEND_WRS, "send_wrs") \ op(EFA_RECV_BYTES, "recv_bytes") \ op(EFA_RECV_WRS, "recv_wrs") \ op(EFA_RDMA_READ_WRS, "rdma_read_wrs") \ op(EFA_RDMA_READ_BYTES, "rdma_read_bytes") \ op(EFA_RDMA_READ_WR_ERR, "rdma_read_wr_err") \ op(EFA_RDMA_READ_RESP_BYTES, "rdma_read_resp_bytes") \ op(EFA_RDMA_WRITE_WRS, "rdma_write_wrs") \ op(EFA_RDMA_WRITE_BYTES, "rdma_write_bytes") \ op(EFA_RDMA_WRITE_WR_ERR, "rdma_write_wr_err") \ op(EFA_RDMA_WRITE_RECV_BYTES, "rdma_write_recv_bytes") \ #define EFA_STATS_ENUM(ename, name) ename, #define EFA_STATS_STR(ename, nam) \ [ename].name = nam, enum efa_hw_device_stats { EFA_DEFINE_DEVICE_STATS(EFA_STATS_ENUM) }; static const struct rdma_stat_desc efa_device_stats_descs[] = { EFA_DEFINE_DEVICE_STATS(EFA_STATS_STR) }; enum efa_hw_port_stats { EFA_DEFINE_PORT_STATS(EFA_STATS_ENUM) }; static const struct rdma_stat_desc efa_port_stats_descs[] = { EFA_DEFINE_PORT_STATS(EFA_STATS_STR) }; #define EFA_CHUNK_PAYLOAD_SHIFT 12 #define EFA_CHUNK_PAYLOAD_SIZE BIT(EFA_CHUNK_PAYLOAD_SHIFT) #define EFA_CHUNK_PAYLOAD_PTR_SIZE 8 #define EFA_CHUNK_SHIFT 12 #define EFA_CHUNK_SIZE BIT(EFA_CHUNK_SHIFT) #define EFA_CHUNK_PTR_SIZE sizeof(struct efa_com_ctrl_buff_info) #define EFA_PTRS_PER_CHUNK \ ((EFA_CHUNK_SIZE - EFA_CHUNK_PTR_SIZE) / EFA_CHUNK_PAYLOAD_PTR_SIZE) #define EFA_CHUNK_USED_SIZE \ ((EFA_PTRS_PER_CHUNK * EFA_CHUNK_PAYLOAD_PTR_SIZE) + EFA_CHUNK_PTR_SIZE) struct pbl_chunk { dma_addr_t dma_addr; u64 *buf; u32 length; }; struct pbl_chunk_list { struct pbl_chunk *chunks; unsigned int size; }; struct pbl_context { union { struct { dma_addr_t dma_addr; } continuous; struct { u32 pbl_buf_size_in_pages; struct scatterlist *sgl; int sg_dma_cnt; struct pbl_chunk_list chunk_list; } indirect; } phys; u64 *pbl_buf; u32 pbl_buf_size_in_bytes; u8 physically_continuous; }; static inline struct efa_dev *to_edev(struct ib_device *ibdev) { return container_of(ibdev, struct efa_dev, ibdev); } static inline struct efa_ucontext *to_eucontext(struct ib_ucontext *ibucontext) { return container_of(ibucontext, struct efa_ucontext, ibucontext); } static inline struct efa_pd *to_epd(struct ib_pd *ibpd) { return container_of(ibpd, struct efa_pd, ibpd); } static inline struct efa_mr *to_emr(struct ib_mr *ibmr) { return container_of(ibmr, struct efa_mr, ibmr); } static inline struct efa_qp *to_eqp(struct ib_qp *ibqp) { return container_of(ibqp, struct efa_qp, ibqp); } static inline struct efa_cq *to_ecq(struct ib_cq *ibcq) { return container_of(ibcq, struct efa_cq, ibcq); } static inline struct efa_ah *to_eah(struct ib_ah *ibah) { return container_of(ibah, struct efa_ah, ibah); } static inline struct efa_user_mmap_entry * to_emmap(struct rdma_user_mmap_entry *rdma_entry) { return container_of(rdma_entry, struct efa_user_mmap_entry, rdma_entry); } #define EFA_DEV_CAP(dev, cap) \ ((dev)->dev_attr.device_caps & \ EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_##cap##_MASK) #define is_reserved_cleared(reserved) \ !memchr_inv(reserved, 0, sizeof(reserved)) static void *efa_zalloc_mapped(struct efa_dev *dev, dma_addr_t *dma_addr, size_t size, enum dma_data_direction dir) { void *addr; addr = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); if (!addr) return NULL; *dma_addr = dma_map_single(&dev->pdev->dev, addr, size, dir); if (dma_mapping_error(&dev->pdev->dev, *dma_addr)) { ibdev_err(&dev->ibdev, "Failed to map DMA address\n"); free_pages_exact(addr, size); return NULL; } return addr; } static void efa_free_mapped(struct efa_dev *dev, void *cpu_addr, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir) { dma_unmap_single(&dev->pdev->dev, dma_addr, size, dir); free_pages_exact(cpu_addr, size); } int efa_query_device(struct ib_device *ibdev, struct ib_device_attr *props, struct ib_udata *udata) { struct efa_com_get_device_attr_result *dev_attr; struct efa_ibv_ex_query_device_resp resp = {}; struct efa_dev *dev = to_edev(ibdev); int err; if (udata && udata->inlen && !ib_is_udata_cleared(udata, 0, udata->inlen)) { ibdev_dbg(ibdev, "Incompatible ABI params, udata not cleared\n"); return -EINVAL; } dev_attr = &dev->dev_attr; memset(props, 0, sizeof(*props)); props->max_mr_size = dev_attr->max_mr_pages * PAGE_SIZE; props->page_size_cap = dev_attr->page_size_cap; props->vendor_id = dev->pdev->vendor; props->vendor_part_id = dev->pdev->device; props->hw_ver = dev->pdev->subsystem_device; props->max_qp = dev_attr->max_qp; props->max_cq = dev_attr->max_cq; props->max_pd = dev_attr->max_pd; props->max_mr = dev_attr->max_mr; props->max_ah = dev_attr->max_ah; props->max_cqe = dev_attr->max_cq_depth; props->max_qp_wr = min_t(u32, dev_attr->max_sq_depth, dev_attr->max_rq_depth); props->max_send_sge = dev_attr->max_sq_sge; props->max_recv_sge = dev_attr->max_rq_sge; props->max_sge_rd = dev_attr->max_wr_rdma_sge; props->max_pkeys = 1; if (udata && udata->outlen) { resp.max_sq_sge = dev_attr->max_sq_sge; resp.max_rq_sge = dev_attr->max_rq_sge; resp.max_sq_wr = dev_attr->max_sq_depth; resp.max_rq_wr = dev_attr->max_rq_depth; resp.max_rdma_size = dev_attr->max_rdma_size; resp.device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_WITH_SGID; if (EFA_DEV_CAP(dev, RDMA_READ)) resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_READ; if (EFA_DEV_CAP(dev, RNR_RETRY)) resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RNR_RETRY; if (EFA_DEV_CAP(dev, DATA_POLLING_128)) resp.device_caps |= EFA_QUERY_DEVICE_CAPS_DATA_POLLING_128; if (EFA_DEV_CAP(dev, RDMA_WRITE)) resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_WRITE; if (dev->neqs) resp.device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_NOTIFICATIONS; err = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen)); if (err) { ibdev_dbg(ibdev, "Failed to copy udata for query_device\n"); return err; } } return 0; } int efa_query_port(struct ib_device *ibdev, u32 port, struct ib_port_attr *props) { struct efa_dev *dev = to_edev(ibdev); props->lmc = 1; props->state = IB_PORT_ACTIVE; props->phys_state = IB_PORT_PHYS_STATE_LINK_UP; props->gid_tbl_len = 1; props->pkey_tbl_len = 1; props->active_speed = IB_SPEED_EDR; props->active_width = IB_WIDTH_4X; props->max_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu); props->active_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu); props->max_msg_sz = dev->dev_attr.mtu; props->max_vl_num = 1; return 0; } int efa_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) { struct efa_dev *dev = to_edev(ibqp->device); struct efa_com_query_qp_params params = {}; struct efa_com_query_qp_result result; struct efa_qp *qp = to_eqp(ibqp); int err; #define EFA_QUERY_QP_SUPP_MASK \ (IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | \ IB_QP_QKEY | IB_QP_SQ_PSN | IB_QP_CAP | IB_QP_RNR_RETRY) if (qp_attr_mask & ~EFA_QUERY_QP_SUPP_MASK) { ibdev_dbg(&dev->ibdev, "Unsupported qp_attr_mask[%#x] supported[%#x]\n", qp_attr_mask, EFA_QUERY_QP_SUPP_MASK); return -EOPNOTSUPP; } memset(qp_attr, 0, sizeof(*qp_attr)); memset(qp_init_attr, 0, sizeof(*qp_init_attr)); params.qp_handle = qp->qp_handle; err = efa_com_query_qp(&dev->edev, &params, &result); if (err) return err; qp_attr->qp_state = result.qp_state; qp_attr->qkey = result.qkey; qp_attr->sq_psn = result.sq_psn; qp_attr->sq_draining = result.sq_draining; qp_attr->port_num = 1; qp_attr->rnr_retry = result.rnr_retry; qp_attr->cap.max_send_wr = qp->max_send_wr; qp_attr->cap.max_recv_wr = qp->max_recv_wr; qp_attr->cap.max_send_sge = qp->max_send_sge; qp_attr->cap.max_recv_sge = qp->max_recv_sge; qp_attr->cap.max_inline_data = qp->max_inline_data; qp_init_attr->qp_type = ibqp->qp_type; qp_init_attr->recv_cq = ibqp->recv_cq; qp_init_attr->send_cq = ibqp->send_cq; qp_init_attr->qp_context = ibqp->qp_context; qp_init_attr->cap = qp_attr->cap; return 0; } int efa_query_gid(struct ib_device *ibdev, u32 port, int index, union ib_gid *gid) { struct efa_dev *dev = to_edev(ibdev); memcpy(gid->raw, dev->dev_attr.addr, sizeof(dev->dev_attr.addr)); return 0; } int efa_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey) { if (index > 0) return -EINVAL; *pkey = 0xffff; return 0; } static int efa_pd_dealloc(struct efa_dev *dev, u16 pdn) { struct efa_com_dealloc_pd_params params = { .pdn = pdn, }; return efa_com_dealloc_pd(&dev->edev, &params); } int efa_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct efa_dev *dev = to_edev(ibpd->device); struct efa_ibv_alloc_pd_resp resp = {}; struct efa_com_alloc_pd_result result; struct efa_pd *pd = to_epd(ibpd); int err; if (udata->inlen && !ib_is_udata_cleared(udata, 0, udata->inlen)) { ibdev_dbg(&dev->ibdev, "Incompatible ABI params, udata not cleared\n"); err = -EINVAL; goto err_out; } err = efa_com_alloc_pd(&dev->edev, &result); if (err) goto err_out; pd->pdn = result.pdn; resp.pdn = result.pdn; if (udata->outlen) { err = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen)); if (err) { ibdev_dbg(&dev->ibdev, "Failed to copy udata for alloc_pd\n"); goto err_dealloc_pd; } } ibdev_dbg(&dev->ibdev, "Allocated pd[%d]\n", pd->pdn); return 0; err_dealloc_pd: efa_pd_dealloc(dev, result.pdn); err_out: atomic64_inc(&dev->stats.alloc_pd_err); return err; } int efa_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct efa_dev *dev = to_edev(ibpd->device); struct efa_pd *pd = to_epd(ibpd); ibdev_dbg(&dev->ibdev, "Dealloc pd[%d]\n", pd->pdn); efa_pd_dealloc(dev, pd->pdn); return 0; } static int efa_destroy_qp_handle(struct efa_dev *dev, u32 qp_handle) { struct efa_com_destroy_qp_params params = { .qp_handle = qp_handle }; return efa_com_destroy_qp(&dev->edev, &params); } static void efa_qp_user_mmap_entries_remove(struct efa_qp *qp) { rdma_user_mmap_entry_remove(qp->rq_mmap_entry); rdma_user_mmap_entry_remove(qp->rq_db_mmap_entry); rdma_user_mmap_entry_remove(qp->llq_desc_mmap_entry); rdma_user_mmap_entry_remove(qp->sq_db_mmap_entry); } int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) { struct efa_dev *dev = to_edev(ibqp->pd->device); struct efa_qp *qp = to_eqp(ibqp); int err; ibdev_dbg(&dev->ibdev, "Destroy qp[%u]\n", ibqp->qp_num); err = efa_destroy_qp_handle(dev, qp->qp_handle); if (err) return err; efa_qp_user_mmap_entries_remove(qp); if (qp->rq_cpu_addr) { ibdev_dbg(&dev->ibdev, "qp->cpu_addr[0x%p] freed: size[%lu], dma[%pad]\n", qp->rq_cpu_addr, qp->rq_size, &qp->rq_dma_addr); efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr, qp->rq_size, DMA_TO_DEVICE); } return 0; } static struct rdma_user_mmap_entry* efa_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address, size_t length, u8 mmap_flag, u64 *offset) { struct efa_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL); int err; if (!entry) return NULL; entry->address = address; entry->mmap_flag = mmap_flag; err = rdma_user_mmap_entry_insert(ucontext, &entry->rdma_entry, length); if (err) { kfree(entry); return NULL; } *offset = rdma_user_mmap_get_offset(&entry->rdma_entry); return &entry->rdma_entry; } static int qp_mmap_entries_setup(struct efa_qp *qp, struct efa_dev *dev, struct efa_ucontext *ucontext, struct efa_com_create_qp_params *params, struct efa_ibv_create_qp_resp *resp) { size_t length; u64 address; address = dev->db_bar_addr + resp->sq_db_offset; qp->sq_db_mmap_entry = efa_user_mmap_entry_insert(&ucontext->ibucontext, address, PAGE_SIZE, EFA_MMAP_IO_NC, &resp->sq_db_mmap_key); if (!qp->sq_db_mmap_entry) return -ENOMEM; resp->sq_db_offset &= ~PAGE_MASK; address = dev->mem_bar_addr + resp->llq_desc_offset; length = PAGE_ALIGN(params->sq_ring_size_in_bytes + (resp->llq_desc_offset & ~PAGE_MASK)); qp->llq_desc_mmap_entry = efa_user_mmap_entry_insert(&ucontext->ibucontext, address, length, EFA_MMAP_IO_WC, &resp->llq_desc_mmap_key); if (!qp->llq_desc_mmap_entry) goto err_remove_mmap; resp->llq_desc_offset &= ~PAGE_MASK; if (qp->rq_size) { address = dev->db_bar_addr + resp->rq_db_offset; qp->rq_db_mmap_entry = efa_user_mmap_entry_insert(&ucontext->ibucontext, address, PAGE_SIZE, EFA_MMAP_IO_NC, &resp->rq_db_mmap_key); if (!qp->rq_db_mmap_entry) goto err_remove_mmap; resp->rq_db_offset &= ~PAGE_MASK; address = virt_to_phys(qp->rq_cpu_addr); qp->rq_mmap_entry = efa_user_mmap_entry_insert(&ucontext->ibucontext, address, qp->rq_size, EFA_MMAP_DMA_PAGE, &resp->rq_mmap_key); if (!qp->rq_mmap_entry) goto err_remove_mmap; resp->rq_mmap_size = qp->rq_size; } return 0; err_remove_mmap: efa_qp_user_mmap_entries_remove(qp); return -ENOMEM; } static int efa_qp_validate_cap(struct efa_dev *dev, struct ib_qp_init_attr *init_attr) { if (init_attr->cap.max_send_wr > dev->dev_attr.max_sq_depth) { ibdev_dbg(&dev->ibdev, "qp: requested send wr[%u] exceeds the max[%u]\n", init_attr->cap.max_send_wr, dev->dev_attr.max_sq_depth); return -EINVAL; } if (init_attr->cap.max_recv_wr > dev->dev_attr.max_rq_depth) { ibdev_dbg(&dev->ibdev, "qp: requested receive wr[%u] exceeds the max[%u]\n", init_attr->cap.max_recv_wr, dev->dev_attr.max_rq_depth); return -EINVAL; } if (init_attr->cap.max_send_sge > dev->dev_attr.max_sq_sge) { ibdev_dbg(&dev->ibdev, "qp: requested sge send[%u] exceeds the max[%u]\n", init_attr->cap.max_send_sge, dev->dev_attr.max_sq_sge); return -EINVAL; } if (init_attr->cap.max_recv_sge > dev->dev_attr.max_rq_sge) { ibdev_dbg(&dev->ibdev, "qp: requested sge recv[%u] exceeds the max[%u]\n", init_attr->cap.max_recv_sge, dev->dev_attr.max_rq_sge); return -EINVAL; } if (init_attr->cap.max_inline_data > dev->dev_attr.inline_buf_size) { ibdev_dbg(&dev->ibdev, "qp: requested inline data[%u] exceeds the max[%u]\n", init_attr->cap.max_inline_data, dev->dev_attr.inline_buf_size); return -EINVAL; } return 0; } static int efa_qp_validate_attr(struct efa_dev *dev, struct ib_qp_init_attr *init_attr) { if (init_attr->qp_type != IB_QPT_DRIVER && init_attr->qp_type != IB_QPT_UD) { ibdev_dbg(&dev->ibdev, "Unsupported qp type %d\n", init_attr->qp_type); return -EOPNOTSUPP; } if (init_attr->srq) { ibdev_dbg(&dev->ibdev, "SRQ is not supported\n"); return -EOPNOTSUPP; } if (init_attr->create_flags) { ibdev_dbg(&dev->ibdev, "Unsupported create flags\n"); return -EOPNOTSUPP; } return 0; } int efa_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr, struct ib_udata *udata) { struct efa_com_create_qp_params create_qp_params = {}; struct efa_com_create_qp_result create_qp_resp; struct efa_dev *dev = to_edev(ibqp->device); struct efa_ibv_create_qp_resp resp = {}; struct efa_ibv_create_qp cmd = {}; struct efa_qp *qp = to_eqp(ibqp); struct efa_ucontext *ucontext; int err; ucontext = rdma_udata_to_drv_context(udata, struct efa_ucontext, ibucontext); err = efa_qp_validate_cap(dev, init_attr); if (err) goto err_out; err = efa_qp_validate_attr(dev, init_attr); if (err) goto err_out; if (offsetofend(typeof(cmd), driver_qp_type) > udata->inlen) { ibdev_dbg(&dev->ibdev, "Incompatible ABI params, no input udata\n"); err = -EINVAL; goto err_out; } if (udata->inlen > sizeof(cmd) && !ib_is_udata_cleared(udata, sizeof(cmd), udata->inlen - sizeof(cmd))) { ibdev_dbg(&dev->ibdev, "Incompatible ABI params, unknown fields in udata\n"); err = -EINVAL; goto err_out; } err = ib_copy_from_udata(&cmd, udata, min(sizeof(cmd), udata->inlen)); if (err) { ibdev_dbg(&dev->ibdev, "Cannot copy udata for create_qp\n"); goto err_out; } if (cmd.comp_mask) { ibdev_dbg(&dev->ibdev, "Incompatible ABI params, unknown fields in udata\n"); err = -EINVAL; goto err_out; } create_qp_params.uarn = ucontext->uarn; create_qp_params.pd = to_epd(ibqp->pd)->pdn; if (init_attr->qp_type == IB_QPT_UD) { create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_UD; } else if (cmd.driver_qp_type == EFA_QP_DRIVER_TYPE_SRD) { create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_SRD; } else { ibdev_dbg(&dev->ibdev, "Unsupported qp type %d driver qp type %d\n", init_attr->qp_type, cmd.driver_qp_type); err = -EOPNOTSUPP; goto err_out; } ibdev_dbg(&dev->ibdev, "Create QP: qp type %d driver qp type %#x\n", init_attr->qp_type, cmd.driver_qp_type); create_qp_params.send_cq_idx = to_ecq(init_attr->send_cq)->cq_idx; create_qp_params.recv_cq_idx = to_ecq(init_attr->recv_cq)->cq_idx; create_qp_params.sq_depth = init_attr->cap.max_send_wr; create_qp_params.sq_ring_size_in_bytes = cmd.sq_ring_size; create_qp_params.rq_depth = init_attr->cap.max_recv_wr; create_qp_params.rq_ring_size_in_bytes = cmd.rq_ring_size; qp->rq_size = PAGE_ALIGN(create_qp_params.rq_ring_size_in_bytes); if (qp->rq_size) { qp->rq_cpu_addr = efa_zalloc_mapped(dev, &qp->rq_dma_addr, qp->rq_size, DMA_TO_DEVICE); if (!qp->rq_cpu_addr) { err = -ENOMEM; goto err_out; } ibdev_dbg(&dev->ibdev, "qp->cpu_addr[0x%p] allocated: size[%lu], dma[%pad]\n", qp->rq_cpu_addr, qp->rq_size, &qp->rq_dma_addr); create_qp_params.rq_base_addr = qp->rq_dma_addr; } err = efa_com_create_qp(&dev->edev, &create_qp_params, &create_qp_resp); if (err) goto err_free_mapped; resp.sq_db_offset = create_qp_resp.sq_db_offset; resp.rq_db_offset = create_qp_resp.rq_db_offset; resp.llq_desc_offset = create_qp_resp.llq_descriptors_offset; resp.send_sub_cq_idx = create_qp_resp.send_sub_cq_idx; resp.recv_sub_cq_idx = create_qp_resp.recv_sub_cq_idx; err = qp_mmap_entries_setup(qp, dev, ucontext, &create_qp_params, &resp); if (err) goto err_destroy_qp; qp->qp_handle = create_qp_resp.qp_handle; qp->ibqp.qp_num = create_qp_resp.qp_num; qp->max_send_wr = init_attr->cap.max_send_wr; qp->max_recv_wr = init_attr->cap.max_recv_wr; qp->max_send_sge = init_attr->cap.max_send_sge; qp->max_recv_sge = init_attr->cap.max_recv_sge; qp->max_inline_data = init_attr->cap.max_inline_data; if (udata->outlen) { err = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen)); if (err) { ibdev_dbg(&dev->ibdev, "Failed to copy udata for qp[%u]\n", create_qp_resp.qp_num); goto err_remove_mmap_entries; } } ibdev_dbg(&dev->ibdev, "Created qp[%d]\n", qp->ibqp.qp_num); return 0; err_remove_mmap_entries: efa_qp_user_mmap_entries_remove(qp); err_destroy_qp: efa_destroy_qp_handle(dev, create_qp_resp.qp_handle); err_free_mapped: if (qp->rq_size) efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr, qp->rq_size, DMA_TO_DEVICE); err_out: atomic64_inc(&dev->stats.create_qp_err); return err; } static const struct { int valid; enum ib_qp_attr_mask req_param; enum ib_qp_attr_mask opt_param; } srd_qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { [IB_QPS_RESET] = { [IB_QPS_RESET] = { .valid = 1 }, [IB_QPS_INIT] = { .valid = 1, .req_param = IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_QKEY, }, }, [IB_QPS_INIT] = { [IB_QPS_RESET] = { .valid = 1 }, [IB_QPS_ERR] = { .valid = 1 }, [IB_QPS_INIT] = { .valid = 1, .opt_param = IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_QKEY, }, [IB_QPS_RTR] = { .valid = 1, .opt_param = IB_QP_PKEY_INDEX | IB_QP_QKEY, }, }, [IB_QPS_RTR] = { [IB_QPS_RESET] = { .valid = 1 }, [IB_QPS_ERR] = { .valid = 1 }, [IB_QPS_RTS] = { .valid = 1, .req_param = IB_QP_SQ_PSN, .opt_param = IB_QP_CUR_STATE | IB_QP_QKEY | IB_QP_RNR_RETRY, } }, [IB_QPS_RTS] = { [IB_QPS_RESET] = { .valid = 1 }, [IB_QPS_ERR] = { .valid = 1 }, [IB_QPS_RTS] = { .valid = 1, .opt_param = IB_QP_CUR_STATE | IB_QP_QKEY, }, [IB_QPS_SQD] = { .valid = 1, .opt_param = IB_QP_EN_SQD_ASYNC_NOTIFY, }, }, [IB_QPS_SQD] = { [IB_QPS_RESET] = { .valid = 1 }, [IB_QPS_ERR] = { .valid = 1 }, [IB_QPS_RTS] = { .valid = 1, .opt_param = IB_QP_CUR_STATE | IB_QP_QKEY, }, [IB_QPS_SQD] = { .valid = 1, .opt_param = IB_QP_PKEY_INDEX | IB_QP_QKEY, } }, [IB_QPS_SQE] = { [IB_QPS_RESET] = { .valid = 1 }, [IB_QPS_ERR] = { .valid = 1 }, [IB_QPS_RTS] = { .valid = 1, .opt_param = IB_QP_CUR_STATE | IB_QP_QKEY, } }, [IB_QPS_ERR] = { [IB_QPS_RESET] = { .valid = 1 }, [IB_QPS_ERR] = { .valid = 1 }, } }; static bool efa_modify_srd_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, enum ib_qp_attr_mask mask) { enum ib_qp_attr_mask req_param, opt_param; if (mask & IB_QP_CUR_STATE && cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS && cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE) return false; if (!srd_qp_state_table[cur_state][next_state].valid) return false; req_param = srd_qp_state_table[cur_state][next_state].req_param; opt_param = srd_qp_state_table[cur_state][next_state].opt_param; if ((mask & req_param) != req_param) return false; if (mask & ~(req_param | opt_param | IB_QP_STATE)) return false; return true; } static int efa_modify_qp_validate(struct efa_dev *dev, struct efa_qp *qp, struct ib_qp_attr *qp_attr, int qp_attr_mask, enum ib_qp_state cur_state, enum ib_qp_state new_state) { int err; #define EFA_MODIFY_QP_SUPP_MASK \ (IB_QP_STATE | IB_QP_CUR_STATE | IB_QP_EN_SQD_ASYNC_NOTIFY | \ IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_QKEY | IB_QP_SQ_PSN | \ IB_QP_RNR_RETRY) if (qp_attr_mask & ~EFA_MODIFY_QP_SUPP_MASK) { ibdev_dbg(&dev->ibdev, "Unsupported qp_attr_mask[%#x] supported[%#x]\n", qp_attr_mask, EFA_MODIFY_QP_SUPP_MASK); return -EOPNOTSUPP; } if (qp->ibqp.qp_type == IB_QPT_DRIVER) err = !efa_modify_srd_qp_is_ok(cur_state, new_state, qp_attr_mask); else err = !ib_modify_qp_is_ok(cur_state, new_state, IB_QPT_UD, qp_attr_mask); if (err) { ibdev_dbg(&dev->ibdev, "Invalid modify QP parameters\n"); return -EINVAL; } if ((qp_attr_mask & IB_QP_PORT) && qp_attr->port_num != 1) { ibdev_dbg(&dev->ibdev, "Can't change port num\n"); return -EOPNOTSUPP; } if ((qp_attr_mask & IB_QP_PKEY_INDEX) && qp_attr->pkey_index) { ibdev_dbg(&dev->ibdev, "Can't change pkey index\n"); return -EOPNOTSUPP; } return 0; } int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_udata *udata) { struct efa_dev *dev = to_edev(ibqp->device); struct efa_com_modify_qp_params params = {}; struct efa_qp *qp = to_eqp(ibqp); enum ib_qp_state cur_state; enum ib_qp_state new_state; int err; if (qp_attr_mask & ~IB_QP_ATTR_STANDARD_BITS) return -EOPNOTSUPP; if (udata->inlen && !ib_is_udata_cleared(udata, 0, udata->inlen)) { ibdev_dbg(&dev->ibdev, "Incompatible ABI params, udata not cleared\n"); return -EINVAL; } cur_state = qp_attr_mask & IB_QP_CUR_STATE ? qp_attr->cur_qp_state : qp->state; new_state = qp_attr_mask & IB_QP_STATE ? qp_attr->qp_state : cur_state; err = efa_modify_qp_validate(dev, qp, qp_attr, qp_attr_mask, cur_state, new_state); if (err) return err; params.qp_handle = qp->qp_handle; if (qp_attr_mask & IB_QP_STATE) { EFA_SET(&params.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_QP_STATE, 1); EFA_SET(&params.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_CUR_QP_STATE, 1); params.cur_qp_state = cur_state; params.qp_state = new_state; } if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) { EFA_SET(&params.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_SQ_DRAINED_ASYNC_NOTIFY, 1); params.sq_drained_async_notify = qp_attr->en_sqd_async_notify; } if (qp_attr_mask & IB_QP_QKEY) { EFA_SET(&params.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_QKEY, 1); params.qkey = qp_attr->qkey; } if (qp_attr_mask & IB_QP_SQ_PSN) { EFA_SET(&params.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_SQ_PSN, 1); params.sq_psn = qp_attr->sq_psn; } if (qp_attr_mask & IB_QP_RNR_RETRY) { EFA_SET(&params.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_RNR_RETRY, 1); params.rnr_retry = qp_attr->rnr_retry; } err = efa_com_modify_qp(&dev->edev, &params); if (err) return err; qp->state = new_state; return 0; } static int efa_destroy_cq_idx(struct efa_dev *dev, int cq_idx) { struct efa_com_destroy_cq_params params = { .cq_idx = cq_idx }; return efa_com_destroy_cq(&dev->edev, &params); } static void efa_cq_user_mmap_entries_remove(struct efa_cq *cq) { rdma_user_mmap_entry_remove(cq->db_mmap_entry); rdma_user_mmap_entry_remove(cq->mmap_entry); } int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) { struct efa_dev *dev = to_edev(ibcq->device); struct efa_cq *cq = to_ecq(ibcq); ibdev_dbg(&dev->ibdev, "Destroy cq[%d] virt[0x%p] freed: size[%lu], dma[%pad]\n", cq->cq_idx, cq->cpu_addr, cq->size, &cq->dma_addr); efa_destroy_cq_idx(dev, cq->cq_idx); efa_cq_user_mmap_entries_remove(cq); if (cq->eq) { xa_erase(&dev->cqs_xa, cq->cq_idx); synchronize_irq(cq->eq->irq.irqn); } efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size, DMA_FROM_DEVICE); return 0; } static struct efa_eq *efa_vec2eq(struct efa_dev *dev, int vec) { return &dev->eqs[vec]; } static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq, struct efa_ibv_create_cq_resp *resp, bool db_valid) { resp->q_mmap_size = cq->size; cq->mmap_entry = efa_user_mmap_entry_insert(&cq->ucontext->ibucontext, virt_to_phys(cq->cpu_addr), cq->size, EFA_MMAP_DMA_PAGE, &resp->q_mmap_key); if (!cq->mmap_entry) return -ENOMEM; if (db_valid) { cq->db_mmap_entry = efa_user_mmap_entry_insert(&cq->ucontext->ibucontext, dev->db_bar_addr + resp->db_off, PAGE_SIZE, EFA_MMAP_IO_NC, &resp->db_mmap_key); if (!cq->db_mmap_entry) { rdma_user_mmap_entry_remove(cq->mmap_entry); return -ENOMEM; } resp->db_off &= ~PAGE_MASK; resp->comp_mask |= EFA_CREATE_CQ_RESP_DB_OFF; } return 0; } int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct ib_udata *udata) { struct efa_ucontext *ucontext = rdma_udata_to_drv_context( udata, struct efa_ucontext, ibucontext); struct efa_com_create_cq_params params = {}; struct efa_ibv_create_cq_resp resp = {}; struct efa_com_create_cq_result result; struct ib_device *ibdev = ibcq->device; struct efa_dev *dev = to_edev(ibdev); struct efa_ibv_create_cq cmd = {}; struct efa_cq *cq = to_ecq(ibcq); int entries = attr->cqe; bool set_src_addr; int err; ibdev_dbg(ibdev, "create_cq entries %d\n", entries); if (attr->flags) return -EOPNOTSUPP; if (entries < 1 || entries > dev->dev_attr.max_cq_depth) { ibdev_dbg(ibdev, "cq: requested entries[%u] non-positive or greater than max[%u]\n", entries, dev->dev_attr.max_cq_depth); err = -EINVAL; goto err_out; } if (offsetofend(typeof(cmd), num_sub_cqs) > udata->inlen) { ibdev_dbg(ibdev, "Incompatible ABI params, no input udata\n"); err = -EINVAL; goto err_out; } if (udata->inlen > sizeof(cmd) && !ib_is_udata_cleared(udata, sizeof(cmd), udata->inlen - sizeof(cmd))) { ibdev_dbg(ibdev, "Incompatible ABI params, unknown fields in udata\n"); err = -EINVAL; goto err_out; } err = ib_copy_from_udata(&cmd, udata, min(sizeof(cmd), udata->inlen)); if (err) { ibdev_dbg(ibdev, "Cannot copy udata for create_cq\n"); goto err_out; } if (cmd.comp_mask || !is_reserved_cleared(cmd.reserved_58)) { ibdev_dbg(ibdev, "Incompatible ABI params, unknown fields in udata\n"); err = -EINVAL; goto err_out; } set_src_addr = !!(cmd.flags & EFA_CREATE_CQ_WITH_SGID); if ((cmd.cq_entry_size != sizeof(struct efa_io_rx_cdesc_ex)) && (set_src_addr || cmd.cq_entry_size != sizeof(struct efa_io_rx_cdesc))) { ibdev_dbg(ibdev, "Invalid entry size [%u]\n", cmd.cq_entry_size); err = -EINVAL; goto err_out; } if (cmd.num_sub_cqs != dev->dev_attr.sub_cqs_per_cq) { ibdev_dbg(ibdev, "Invalid number of sub cqs[%u] expected[%u]\n", cmd.num_sub_cqs, dev->dev_attr.sub_cqs_per_cq); err = -EINVAL; goto err_out; } cq->ucontext = ucontext; cq->size = PAGE_ALIGN(cmd.cq_entry_size * entries * cmd.num_sub_cqs); cq->cpu_addr = efa_zalloc_mapped(dev, &cq->dma_addr, cq->size, DMA_FROM_DEVICE); if (!cq->cpu_addr) { err = -ENOMEM; goto err_out; } params.uarn = cq->ucontext->uarn; params.cq_depth = entries; params.dma_addr = cq->dma_addr; params.entry_size_in_bytes = cmd.cq_entry_size; params.num_sub_cqs = cmd.num_sub_cqs; params.set_src_addr = set_src_addr; if (cmd.flags & EFA_CREATE_CQ_WITH_COMPLETION_CHANNEL) { cq->eq = efa_vec2eq(dev, attr->comp_vector); params.eqn = cq->eq->eeq.eqn; params.interrupt_mode_enabled = true; } err = efa_com_create_cq(&dev->edev, &params, &result); if (err) goto err_free_mapped; resp.db_off = result.db_off; resp.cq_idx = result.cq_idx; cq->cq_idx = result.cq_idx; cq->ibcq.cqe = result.actual_depth; WARN_ON_ONCE(entries != result.actual_depth); err = cq_mmap_entries_setup(dev, cq, &resp, result.db_valid); if (err) { ibdev_dbg(ibdev, "Could not setup cq[%u] mmap entries\n", cq->cq_idx); goto err_destroy_cq; } if (cq->eq) { err = xa_err(xa_store(&dev->cqs_xa, cq->cq_idx, cq, GFP_KERNEL)); if (err) { ibdev_dbg(ibdev, "Failed to store cq[%u] in xarray\n", cq->cq_idx); goto err_remove_mmap; } } if (udata->outlen) { err = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen)); if (err) { ibdev_dbg(ibdev, "Failed to copy udata for create_cq\n"); goto err_xa_erase; } } ibdev_dbg(ibdev, "Created cq[%d], cq depth[%u]. dma[%pad] virt[0x%p]\n", cq->cq_idx, result.actual_depth, &cq->dma_addr, cq->cpu_addr); return 0; err_xa_erase: if (cq->eq) xa_erase(&dev->cqs_xa, cq->cq_idx); err_remove_mmap: efa_cq_user_mmap_entries_remove(cq); err_destroy_cq: efa_destroy_cq_idx(dev, cq->cq_idx); err_free_mapped: efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size, DMA_FROM_DEVICE); err_out: atomic64_inc(&dev->stats.create_cq_err); return err; } static int umem_to_page_list(struct efa_dev *dev, struct ib_umem *umem, u64 *page_list, u32 hp_cnt, u8 hp_shift) { u32 pages_in_hp = BIT(hp_shift - PAGE_SHIFT); struct ib_block_iter biter; unsigned int hp_idx = 0; ibdev_dbg(&dev->ibdev, "hp_cnt[%u], pages_in_hp[%u]\n", hp_cnt, pages_in_hp); rdma_umem_for_each_dma_block(umem, &biter, BIT(hp_shift)) page_list[hp_idx++] = rdma_block_iter_dma_address(&biter); return 0; } static struct scatterlist *efa_vmalloc_buf_to_sg(u64 *buf, int page_cnt) { struct scatterlist *sglist; struct page *pg; int i; sglist = kmalloc_array(page_cnt, sizeof(*sglist), GFP_KERNEL); if (!sglist) return NULL; sg_init_table(sglist, page_cnt); for (i = 0; i < page_cnt; i++) { pg = vmalloc_to_page(buf); if (!pg) goto err; sg_set_page(&sglist[i], pg, PAGE_SIZE, 0); buf += PAGE_SIZE / sizeof(*buf); } return sglist; err: kfree(sglist); return NULL; } /* * create a chunk list of physical pages dma addresses from the supplied * scatter gather list */ static int pbl_chunk_list_create(struct efa_dev *dev, struct pbl_context *pbl) { struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list; int page_cnt = pbl->phys.indirect.pbl_buf_size_in_pages; struct scatterlist *pages_sgl = pbl->phys.indirect.sgl; unsigned int chunk_list_size, chunk_idx, payload_idx; int sg_dma_cnt = pbl->phys.indirect.sg_dma_cnt; struct efa_com_ctrl_buff_info *ctrl_buf; u64 *cur_chunk_buf, *prev_chunk_buf; struct ib_block_iter biter; dma_addr_t dma_addr; int i; /* allocate a chunk list that consists of 4KB chunks */ chunk_list_size = DIV_ROUND_UP(page_cnt, EFA_PTRS_PER_CHUNK); chunk_list->size = chunk_list_size; chunk_list->chunks = kcalloc(chunk_list_size, sizeof(*chunk_list->chunks), GFP_KERNEL); if (!chunk_list->chunks) return -ENOMEM; ibdev_dbg(&dev->ibdev, "chunk_list_size[%u] - pages[%u]\n", chunk_list_size, page_cnt); /* allocate chunk buffers: */ for (i = 0; i < chunk_list_size; i++) { chunk_list->chunks[i].buf = kzalloc(EFA_CHUNK_SIZE, GFP_KERNEL); if (!chunk_list->chunks[i].buf) goto chunk_list_dealloc; chunk_list->chunks[i].length = EFA_CHUNK_USED_SIZE; } chunk_list->chunks[chunk_list_size - 1].length = ((page_cnt % EFA_PTRS_PER_CHUNK) * EFA_CHUNK_PAYLOAD_PTR_SIZE) + EFA_CHUNK_PTR_SIZE; /* fill the dma addresses of sg list pages to chunks: */ chunk_idx = 0; payload_idx = 0; cur_chunk_buf = chunk_list->chunks[0].buf; rdma_for_each_block(pages_sgl, &biter, sg_dma_cnt, EFA_CHUNK_PAYLOAD_SIZE) { cur_chunk_buf[payload_idx++] = rdma_block_iter_dma_address(&biter); if (payload_idx == EFA_PTRS_PER_CHUNK) { chunk_idx++; cur_chunk_buf = chunk_list->chunks[chunk_idx].buf; payload_idx = 0; } } /* map chunks to dma and fill chunks next ptrs */ for (i = chunk_list_size - 1; i >= 0; i--) { dma_addr = dma_map_single(&dev->pdev->dev, chunk_list->chunks[i].buf, chunk_list->chunks[i].length, DMA_TO_DEVICE); if (dma_mapping_error(&dev->pdev->dev, dma_addr)) { ibdev_err(&dev->ibdev, "chunk[%u] dma_map_failed\n", i); goto chunk_list_unmap; } chunk_list->chunks[i].dma_addr = dma_addr; ibdev_dbg(&dev->ibdev, "chunk[%u] mapped at [%pad]\n", i, &dma_addr); if (!i) break; prev_chunk_buf = chunk_list->chunks[i - 1].buf; ctrl_buf = (struct efa_com_ctrl_buff_info *) &prev_chunk_buf[EFA_PTRS_PER_CHUNK]; ctrl_buf->length = chunk_list->chunks[i].length; efa_com_set_dma_addr(dma_addr, &ctrl_buf->address.mem_addr_high, &ctrl_buf->address.mem_addr_low); } return 0; chunk_list_unmap: for (; i < chunk_list_size; i++) { dma_unmap_single(&dev->pdev->dev, chunk_list->chunks[i].dma_addr, chunk_list->chunks[i].length, DMA_TO_DEVICE); } chunk_list_dealloc: for (i = 0; i < chunk_list_size; i++) kfree(chunk_list->chunks[i].buf); kfree(chunk_list->chunks); return -ENOMEM; } static void pbl_chunk_list_destroy(struct efa_dev *dev, struct pbl_context *pbl) { struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list; int i; for (i = 0; i < chunk_list->size; i++) { dma_unmap_single(&dev->pdev->dev, chunk_list->chunks[i].dma_addr, chunk_list->chunks[i].length, DMA_TO_DEVICE); kfree(chunk_list->chunks[i].buf); } kfree(chunk_list->chunks); } /* initialize pbl continuous mode: map pbl buffer to a dma address. */ static int pbl_continuous_initialize(struct efa_dev *dev, struct pbl_context *pbl) { dma_addr_t dma_addr; dma_addr = dma_map_single(&dev->pdev->dev, pbl->pbl_buf, pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE); if (dma_mapping_error(&dev->pdev->dev, dma_addr)) { ibdev_err(&dev->ibdev, "Unable to map pbl to DMA address\n"); return -ENOMEM; } pbl->phys.continuous.dma_addr = dma_addr; ibdev_dbg(&dev->ibdev, "pbl continuous - dma_addr = %pad, size[%u]\n", &dma_addr, pbl->pbl_buf_size_in_bytes); return 0; } /* * initialize pbl indirect mode: * create a chunk list out of the dma addresses of the physical pages of * pbl buffer. */ static int pbl_indirect_initialize(struct efa_dev *dev, struct pbl_context *pbl) { u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, EFA_CHUNK_PAYLOAD_SIZE); struct scatterlist *sgl; int sg_dma_cnt, err; BUILD_BUG_ON(EFA_CHUNK_PAYLOAD_SIZE > PAGE_SIZE); sgl = efa_vmalloc_buf_to_sg(pbl->pbl_buf, size_in_pages); if (!sgl) return -ENOMEM; sg_dma_cnt = dma_map_sg(&dev->pdev->dev, sgl, size_in_pages, DMA_TO_DEVICE); if (!sg_dma_cnt) { err = -EINVAL; goto err_map; } pbl->phys.indirect.pbl_buf_size_in_pages = size_in_pages; pbl->phys.indirect.sgl = sgl; pbl->phys.indirect.sg_dma_cnt = sg_dma_cnt; err = pbl_chunk_list_create(dev, pbl); if (err) { ibdev_dbg(&dev->ibdev, "chunk_list creation failed[%d]\n", err); goto err_chunk; } ibdev_dbg(&dev->ibdev, "pbl indirect - size[%u], chunks[%u]\n", pbl->pbl_buf_size_in_bytes, pbl->phys.indirect.chunk_list.size); return 0; err_chunk: dma_unmap_sg(&dev->pdev->dev, sgl, size_in_pages, DMA_TO_DEVICE); err_map: kfree(sgl); return err; } static void pbl_indirect_terminate(struct efa_dev *dev, struct pbl_context *pbl) { pbl_chunk_list_destroy(dev, pbl); dma_unmap_sg(&dev->pdev->dev, pbl->phys.indirect.sgl, pbl->phys.indirect.pbl_buf_size_in_pages, DMA_TO_DEVICE); kfree(pbl->phys.indirect.sgl); } /* create a page buffer list from a mapped user memory region */ static int pbl_create(struct efa_dev *dev, struct pbl_context *pbl, struct ib_umem *umem, int hp_cnt, u8 hp_shift) { int err; pbl->pbl_buf_size_in_bytes = hp_cnt * EFA_CHUNK_PAYLOAD_PTR_SIZE; pbl->pbl_buf = kvzalloc(pbl->pbl_buf_size_in_bytes, GFP_KERNEL); if (!pbl->pbl_buf) return -ENOMEM; if (is_vmalloc_addr(pbl->pbl_buf)) { pbl->physically_continuous = 0; err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt, hp_shift); if (err) goto err_free; err = pbl_indirect_initialize(dev, pbl); if (err) goto err_free; } else { pbl->physically_continuous = 1; err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt, hp_shift); if (err) goto err_free; err = pbl_continuous_initialize(dev, pbl); if (err) goto err_free; } ibdev_dbg(&dev->ibdev, "user_pbl_created: user_pages[%u], continuous[%u]\n", hp_cnt, pbl->physically_continuous); return 0; err_free: kvfree(pbl->pbl_buf); return err; } static void pbl_destroy(struct efa_dev *dev, struct pbl_context *pbl) { if (pbl->physically_continuous) dma_unmap_single(&dev->pdev->dev, pbl->phys.continuous.dma_addr, pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE); else pbl_indirect_terminate(dev, pbl); kvfree(pbl->pbl_buf); } static int efa_create_inline_pbl(struct efa_dev *dev, struct efa_mr *mr, struct efa_com_reg_mr_params *params) { int err; params->inline_pbl = 1; err = umem_to_page_list(dev, mr->umem, params->pbl.inline_pbl_array, params->page_num, params->page_shift); if (err) return err; ibdev_dbg(&dev->ibdev, "inline_pbl_array - pages[%u]\n", params->page_num); return 0; } static int efa_create_pbl(struct efa_dev *dev, struct pbl_context *pbl, struct efa_mr *mr, struct efa_com_reg_mr_params *params) { int err; err = pbl_create(dev, pbl, mr->umem, params->page_num, params->page_shift); if (err) { ibdev_dbg(&dev->ibdev, "Failed to create pbl[%d]\n", err); return err; } params->inline_pbl = 0; params->indirect = !pbl->physically_continuous; if (pbl->physically_continuous) { params->pbl.pbl.length = pbl->pbl_buf_size_in_bytes; efa_com_set_dma_addr(pbl->phys.continuous.dma_addr, &params->pbl.pbl.address.mem_addr_high, &params->pbl.pbl.address.mem_addr_low); } else { params->pbl.pbl.length = pbl->phys.indirect.chunk_list.chunks[0].length; efa_com_set_dma_addr(pbl->phys.indirect.chunk_list.chunks[0].dma_addr, &params->pbl.pbl.address.mem_addr_high, &params->pbl.pbl.address.mem_addr_low); } return 0; } static struct efa_mr *efa_alloc_mr(struct ib_pd *ibpd, int access_flags, struct ib_udata *udata) { struct efa_dev *dev = to_edev(ibpd->device); int supp_access_flags; struct efa_mr *mr; if (udata && udata->inlen && !ib_is_udata_cleared(udata, 0, sizeof(udata->inlen))) { ibdev_dbg(&dev->ibdev, "Incompatible ABI params, udata not cleared\n"); return ERR_PTR(-EINVAL); } supp_access_flags = IB_ACCESS_LOCAL_WRITE | (EFA_DEV_CAP(dev, RDMA_READ) ? IB_ACCESS_REMOTE_READ : 0) | (EFA_DEV_CAP(dev, RDMA_WRITE) ? IB_ACCESS_REMOTE_WRITE : 0); access_flags &= ~IB_ACCESS_OPTIONAL; if (access_flags & ~supp_access_flags) { ibdev_dbg(&dev->ibdev, "Unsupported access flags[%#x], supported[%#x]\n", access_flags, supp_access_flags); return ERR_PTR(-EOPNOTSUPP); } mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) return ERR_PTR(-ENOMEM); return mr; } static int efa_register_mr(struct ib_pd *ibpd, struct efa_mr *mr, u64 start, u64 length, u64 virt_addr, int access_flags) { struct efa_dev *dev = to_edev(ibpd->device); struct efa_com_reg_mr_params params = {}; struct efa_com_reg_mr_result result = {}; struct pbl_context pbl; unsigned int pg_sz; int inline_size; int err; params.pd = to_epd(ibpd)->pdn; params.iova = virt_addr; params.mr_length_in_bytes = length; params.permissions = access_flags; pg_sz = ib_umem_find_best_pgsz(mr->umem, dev->dev_attr.page_size_cap, virt_addr); if (!pg_sz) { ibdev_dbg(&dev->ibdev, "Failed to find a suitable page size in page_size_cap %#llx\n", dev->dev_attr.page_size_cap); return -EOPNOTSUPP; } params.page_shift = order_base_2(pg_sz); params.page_num = ib_umem_num_dma_blocks(mr->umem, pg_sz); ibdev_dbg(&dev->ibdev, "start %#llx length %#llx params.page_shift %u params.page_num %u\n", start, length, params.page_shift, params.page_num); inline_size = ARRAY_SIZE(params.pbl.inline_pbl_array); if (params.page_num <= inline_size) { err = efa_create_inline_pbl(dev, mr, &params); if (err) return err; err = efa_com_register_mr(&dev->edev, &params, &result); if (err) return err; } else { err = efa_create_pbl(dev, &pbl, mr, &params); if (err) return err; err = efa_com_register_mr(&dev->edev, &params, &result); pbl_destroy(dev, &pbl); if (err) return err; } mr->ibmr.lkey = result.l_key; mr->ibmr.rkey = result.r_key; mr->ibmr.length = length; ibdev_dbg(&dev->ibdev, "Registered mr[%d]\n", mr->ibmr.lkey); return 0; } struct ib_mr *efa_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, u64 length, u64 virt_addr, int fd, int access_flags, struct ib_udata *udata) { struct efa_dev *dev = to_edev(ibpd->device); struct ib_umem_dmabuf *umem_dmabuf; struct efa_mr *mr; int err; mr = efa_alloc_mr(ibpd, access_flags, udata); if (IS_ERR(mr)) { err = PTR_ERR(mr); goto err_out; } umem_dmabuf = ib_umem_dmabuf_get_pinned(ibpd->device, start, length, fd, access_flags); if (IS_ERR(umem_dmabuf)) { err = PTR_ERR(umem_dmabuf); ibdev_dbg(&dev->ibdev, "Failed to get dmabuf umem[%d]\n", err); goto err_free; } mr->umem = &umem_dmabuf->umem; err = efa_register_mr(ibpd, mr, start, length, virt_addr, access_flags); if (err) goto err_release; return &mr->ibmr; err_release: ib_umem_release(mr->umem); err_free: kfree(mr); err_out: atomic64_inc(&dev->stats.reg_mr_err); return ERR_PTR(err); } struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length, u64 virt_addr, int access_flags, struct ib_udata *udata) { struct efa_dev *dev = to_edev(ibpd->device); struct efa_mr *mr; int err; mr = efa_alloc_mr(ibpd, access_flags, udata); if (IS_ERR(mr)) { err = PTR_ERR(mr); goto err_out; } mr->umem = ib_umem_get(ibpd->device, start, length, access_flags); if (IS_ERR(mr->umem)) { err = PTR_ERR(mr->umem); ibdev_dbg(&dev->ibdev, "Failed to pin and map user space memory[%d]\n", err); goto err_free; } err = efa_register_mr(ibpd, mr, start, length, virt_addr, access_flags); if (err) goto err_release; return &mr->ibmr; err_release: ib_umem_release(mr->umem); err_free: kfree(mr); err_out: atomic64_inc(&dev->stats.reg_mr_err); return ERR_PTR(err); } int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) { struct efa_dev *dev = to_edev(ibmr->device); struct efa_com_dereg_mr_params params; struct efa_mr *mr = to_emr(ibmr); int err; ibdev_dbg(&dev->ibdev, "Deregister mr[%d]\n", ibmr->lkey); params.l_key = mr->ibmr.lkey; err = efa_com_dereg_mr(&dev->edev, &params); if (err) return err; ib_umem_release(mr->umem); kfree(mr); return 0; } int efa_get_port_immutable(struct ib_device *ibdev, u32 port_num, struct ib_port_immutable *immutable) { struct ib_port_attr attr; int err; err = ib_query_port(ibdev, port_num, &attr); if (err) { ibdev_dbg(ibdev, "Couldn't query port err[%d]\n", err); return err; } immutable->pkey_tbl_len = attr.pkey_tbl_len; immutable->gid_tbl_len = attr.gid_tbl_len; return 0; } static int efa_dealloc_uar(struct efa_dev *dev, u16 uarn) { struct efa_com_dealloc_uar_params params = { .uarn = uarn, }; return efa_com_dealloc_uar(&dev->edev, &params); } #define EFA_CHECK_USER_COMP(_dev, _comp_mask, _attr, _mask, _attr_str) \ (_attr_str = (!(_dev)->dev_attr._attr || ((_comp_mask) & (_mask))) ? \ NULL : #_attr) static int efa_user_comp_handshake(const struct ib_ucontext *ibucontext, const struct efa_ibv_alloc_ucontext_cmd *cmd) { struct efa_dev *dev = to_edev(ibucontext->device); char *attr_str; if (EFA_CHECK_USER_COMP(dev, cmd->comp_mask, max_tx_batch, EFA_ALLOC_UCONTEXT_CMD_COMP_TX_BATCH, attr_str)) goto err; if (EFA_CHECK_USER_COMP(dev, cmd->comp_mask, min_sq_depth, EFA_ALLOC_UCONTEXT_CMD_COMP_MIN_SQ_WR, attr_str)) goto err; return 0; err: ibdev_dbg(&dev->ibdev, "Userspace handshake failed for %s attribute\n", attr_str); return -EOPNOTSUPP; } int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata) { struct efa_ucontext *ucontext = to_eucontext(ibucontext); struct efa_dev *dev = to_edev(ibucontext->device); struct efa_ibv_alloc_ucontext_resp resp = {}; struct efa_ibv_alloc_ucontext_cmd cmd = {}; struct efa_com_alloc_uar_result result; int err; /* * it's fine if the driver does not know all request fields, * we will ack input fields in our response. */ err = ib_copy_from_udata(&cmd, udata, min(sizeof(cmd), udata->inlen)); if (err) { ibdev_dbg(&dev->ibdev, "Cannot copy udata for alloc_ucontext\n"); goto err_out; } err = efa_user_comp_handshake(ibucontext, &cmd); if (err) goto err_out; err = efa_com_alloc_uar(&dev->edev, &result); if (err) goto err_out; ucontext->uarn = result.uarn; resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_QUERY_DEVICE; resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_CREATE_AH; resp.sub_cqs_per_cq = dev->dev_attr.sub_cqs_per_cq; resp.inline_buf_size = dev->dev_attr.inline_buf_size; resp.max_llq_size = dev->dev_attr.max_llq_size; resp.max_tx_batch = dev->dev_attr.max_tx_batch; resp.min_sq_wr = dev->dev_attr.min_sq_depth; err = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen)); if (err) goto err_dealloc_uar; return 0; err_dealloc_uar: efa_dealloc_uar(dev, result.uarn); err_out: atomic64_inc(&dev->stats.alloc_ucontext_err); return err; } void efa_dealloc_ucontext(struct ib_ucontext *ibucontext) { struct efa_ucontext *ucontext = to_eucontext(ibucontext); struct efa_dev *dev = to_edev(ibucontext->device); efa_dealloc_uar(dev, ucontext->uarn); } void efa_mmap_free(struct rdma_user_mmap_entry *rdma_entry) { struct efa_user_mmap_entry *entry = to_emmap(rdma_entry); kfree(entry); } static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext, struct vm_area_struct *vma) { struct rdma_user_mmap_entry *rdma_entry; struct efa_user_mmap_entry *entry; unsigned long va; int err = 0; u64 pfn; rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma); if (!rdma_entry) { ibdev_dbg(&dev->ibdev, "pgoff[%#lx] does not have valid entry\n", vma->vm_pgoff); atomic64_inc(&dev->stats.mmap_err); return -EINVAL; } entry = to_emmap(rdma_entry); ibdev_dbg(&dev->ibdev, "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n", entry->address, rdma_entry->npages * PAGE_SIZE, entry->mmap_flag); pfn = entry->address >> PAGE_SHIFT; switch (entry->mmap_flag) { case EFA_MMAP_IO_NC: err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, entry->rdma_entry.npages * PAGE_SIZE, pgprot_noncached(vma->vm_page_prot), rdma_entry); break; case EFA_MMAP_IO_WC: err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, entry->rdma_entry.npages * PAGE_SIZE, pgprot_writecombine(vma->vm_page_prot), rdma_entry); break; case EFA_MMAP_DMA_PAGE: for (va = vma->vm_start; va < vma->vm_end; va += PAGE_SIZE, pfn++) { err = vm_insert_page(vma, va, pfn_to_page(pfn)); if (err) break; } break; default: err = -EINVAL; } if (err) { ibdev_dbg( &dev->ibdev, "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n", entry->address, rdma_entry->npages * PAGE_SIZE, entry->mmap_flag, err); atomic64_inc(&dev->stats.mmap_err); } rdma_user_mmap_entry_put(rdma_entry); return err; } int efa_mmap(struct ib_ucontext *ibucontext, struct vm_area_struct *vma) { struct efa_ucontext *ucontext = to_eucontext(ibucontext); struct efa_dev *dev = to_edev(ibucontext->device); size_t length = vma->vm_end - vma->vm_start; ibdev_dbg(&dev->ibdev, "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n", vma->vm_start, vma->vm_end, length, vma->vm_pgoff); return __efa_mmap(dev, ucontext, vma); } static int efa_ah_destroy(struct efa_dev *dev, struct efa_ah *ah) { struct efa_com_destroy_ah_params params = { .ah = ah->ah, .pdn = to_epd(ah->ibah.pd)->pdn, }; return efa_com_destroy_ah(&dev->edev, &params); } int efa_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, struct ib_udata *udata) { struct rdma_ah_attr *ah_attr = init_attr->ah_attr; struct efa_dev *dev = to_edev(ibah->device); struct efa_com_create_ah_params params = {}; struct efa_ibv_create_ah_resp resp = {}; struct efa_com_create_ah_result result; struct efa_ah *ah = to_eah(ibah); int err; if (!(init_attr->flags & RDMA_CREATE_AH_SLEEPABLE)) { ibdev_dbg(&dev->ibdev, "Create address handle is not supported in atomic context\n"); err = -EOPNOTSUPP; goto err_out; } if (udata->inlen && !ib_is_udata_cleared(udata, 0, udata->inlen)) { ibdev_dbg(&dev->ibdev, "Incompatible ABI params\n"); err = -EINVAL; goto err_out; } memcpy(params.dest_addr, ah_attr->grh.dgid.raw, sizeof(params.dest_addr)); params.pdn = to_epd(ibah->pd)->pdn; err = efa_com_create_ah(&dev->edev, &params, &result); if (err) goto err_out; memcpy(ah->id, ah_attr->grh.dgid.raw, sizeof(ah->id)); ah->ah = result.ah; resp.efa_address_handle = result.ah; if (udata->outlen) { err = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen)); if (err) { ibdev_dbg(&dev->ibdev, "Failed to copy udata for create_ah response\n"); goto err_destroy_ah; } } ibdev_dbg(&dev->ibdev, "Created ah[%d]\n", ah->ah); return 0; err_destroy_ah: efa_ah_destroy(dev, ah); err_out: atomic64_inc(&dev->stats.create_ah_err); return err; } int efa_destroy_ah(struct ib_ah *ibah, u32 flags) { struct efa_dev *dev = to_edev(ibah->pd->device); struct efa_ah *ah = to_eah(ibah); ibdev_dbg(&dev->ibdev, "Destroy ah[%d]\n", ah->ah); if (!(flags & RDMA_DESTROY_AH_SLEEPABLE)) { ibdev_dbg(&dev->ibdev, "Destroy address handle is not supported in atomic context\n"); return -EOPNOTSUPP; } efa_ah_destroy(dev, ah); return 0; } struct rdma_hw_stats *efa_alloc_hw_port_stats(struct ib_device *ibdev, u32 port_num) { return rdma_alloc_hw_stats_struct(efa_port_stats_descs, ARRAY_SIZE(efa_port_stats_descs), RDMA_HW_STATS_DEFAULT_LIFESPAN); } struct rdma_hw_stats *efa_alloc_hw_device_stats(struct ib_device *ibdev) { return rdma_alloc_hw_stats_struct(efa_device_stats_descs, ARRAY_SIZE(efa_device_stats_descs), RDMA_HW_STATS_DEFAULT_LIFESPAN); } static int efa_fill_device_stats(struct efa_dev *dev, struct rdma_hw_stats *stats) { struct efa_com_stats_admin *as = &dev->edev.aq.stats; struct efa_stats *s = &dev->stats; stats->value[EFA_SUBMITTED_CMDS] = atomic64_read(&as->submitted_cmd); stats->value[EFA_COMPLETED_CMDS] = atomic64_read(&as->completed_cmd); stats->value[EFA_CMDS_ERR] = atomic64_read(&as->cmd_err); stats->value[EFA_NO_COMPLETION_CMDS] = atomic64_read(&as->no_completion); stats->value[EFA_KEEP_ALIVE_RCVD] = atomic64_read(&s->keep_alive_rcvd); stats->value[EFA_ALLOC_PD_ERR] = atomic64_read(&s->alloc_pd_err); stats->value[EFA_CREATE_QP_ERR] = atomic64_read(&s->create_qp_err); stats->value[EFA_CREATE_CQ_ERR] = atomic64_read(&s->create_cq_err); stats->value[EFA_REG_MR_ERR] = atomic64_read(&s->reg_mr_err); stats->value[EFA_ALLOC_UCONTEXT_ERR] = atomic64_read(&s->alloc_ucontext_err); stats->value[EFA_CREATE_AH_ERR] = atomic64_read(&s->create_ah_err); stats->value[EFA_MMAP_ERR] = atomic64_read(&s->mmap_err); return ARRAY_SIZE(efa_device_stats_descs); } static int efa_fill_port_stats(struct efa_dev *dev, struct rdma_hw_stats *stats, u32 port_num) { struct efa_com_get_stats_params params = {}; union efa_com_get_stats_result result; struct efa_com_rdma_write_stats *rws; struct efa_com_rdma_read_stats *rrs; struct efa_com_messages_stats *ms; struct efa_com_basic_stats *bs; int err; params.scope = EFA_ADMIN_GET_STATS_SCOPE_ALL; params.type = EFA_ADMIN_GET_STATS_TYPE_BASIC; err = efa_com_get_stats(&dev->edev, &params, &result); if (err) return err; bs = &result.basic_stats; stats->value[EFA_TX_BYTES] = bs->tx_bytes; stats->value[EFA_TX_PKTS] = bs->tx_pkts; stats->value[EFA_RX_BYTES] = bs->rx_bytes; stats->value[EFA_RX_PKTS] = bs->rx_pkts; stats->value[EFA_RX_DROPS] = bs->rx_drops; params.type = EFA_ADMIN_GET_STATS_TYPE_MESSAGES; err = efa_com_get_stats(&dev->edev, &params, &result); if (err) return err; ms = &result.messages_stats; stats->value[EFA_SEND_BYTES] = ms->send_bytes; stats->value[EFA_SEND_WRS] = ms->send_wrs; stats->value[EFA_RECV_BYTES] = ms->recv_bytes; stats->value[EFA_RECV_WRS] = ms->recv_wrs; params.type = EFA_ADMIN_GET_STATS_TYPE_RDMA_READ; err = efa_com_get_stats(&dev->edev, &params, &result); if (err) return err; rrs = &result.rdma_read_stats; stats->value[EFA_RDMA_READ_WRS] = rrs->read_wrs; stats->value[EFA_RDMA_READ_BYTES] = rrs->read_bytes; stats->value[EFA_RDMA_READ_WR_ERR] = rrs->read_wr_err; stats->value[EFA_RDMA_READ_RESP_BYTES] = rrs->read_resp_bytes; if (EFA_DEV_CAP(dev, RDMA_WRITE)) { params.type = EFA_ADMIN_GET_STATS_TYPE_RDMA_WRITE; err = efa_com_get_stats(&dev->edev, &params, &result); if (err) return err; rws = &result.rdma_write_stats; stats->value[EFA_RDMA_WRITE_WRS] = rws->write_wrs; stats->value[EFA_RDMA_WRITE_BYTES] = rws->write_bytes; stats->value[EFA_RDMA_WRITE_WR_ERR] = rws->write_wr_err; stats->value[EFA_RDMA_WRITE_RECV_BYTES] = rws->write_recv_bytes; } return ARRAY_SIZE(efa_port_stats_descs); } int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, u32 port_num, int index) { if (port_num) return efa_fill_port_stats(to_edev(ibdev), stats, port_num); else return efa_fill_device_stats(to_edev(ibdev), stats); } enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev, u32 port_num) { return IB_LINK_LAYER_UNSPECIFIED; }
linux-master
drivers/infiniband/hw/efa/efa_verbs.c